Commit 6f8f3f78 authored by Ole Streicher's avatar Ole Streicher

New upstream version 1.1.2

parent 29cea210
This diff is collapsed.
Metadata-Version: 1.1
Name: astropy
Version: 1.1.1
Version: 1.1.2
Summary: Community-developed python astronomy tools
Home-page: http://astropy.org
Author: The Astropy Developers
Author-email: astropy.team@gmail.com
License: BSD
Download-URL: http://pypi.python.org/packages/source/a/astropy/astropy-1.1.1.tar.gz
Download-URL: http://pypi.python.org/packages/source/a/astropy/astropy-1.1.2.tar.gz
Description:
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
Keywords: astronomy,astrophysics,cosmology,space,science,units,table,wcs,vo,samp,coordinate,fits,modeling,models,fitting,ascii
Platform: UNKNOWN
Classifier: Intended Audience :: Science/Research
Classifier: License :: OSI Approved :: BSD License
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -11,6 +11,8 @@ from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION
from ..utils.exceptions import AstropyUserWarning
from ..utils.console import human_file_size
from astropy import units as u
# Disabling all doctests in this module until a better way of handling warnings
# in doctests can be determined
......@@ -22,11 +24,12 @@ def convolve(array, kernel, boundary='fill', fill_value=0.,
'''
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.filters.convolve` because
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN``s in the convolution calculation, which causes large
``NaN`` holes in the convolved image, ``NaN`` values are replaced with
interpolated values using the kernel as an interpolation function.
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
......@@ -50,7 +53,7 @@ def convolve(array, kernel, boundary='fill', fill_value=0.,
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using boundary='fill'
The value to use outside the array when using ``boundary='fill'``
normalize_kernel : bool, optional
Whether to normalize the kernel prior to convolving
......@@ -221,7 +224,7 @@ def convolve(array, kernel, boundary='fill', fill_value=0.,
def convolve_fft(array, kernel, boundary='fill', fill_value=0, crop=True,
return_fft=False, fft_pad=True, psf_pad=False,
return_fft=False, fft_pad=None, psf_pad=None,
interpolate_nan=False, quiet=False, ignore_edge_zeros=False,
min_wt=0.0, normalize_kernel=False, allow_huge=False,
fftn=np.fft.fftn, ifftn=np.fft.ifftn,
......@@ -284,10 +287,13 @@ def convolve_fft(array, kernel, boundary='fill', fill_value=0, crop=True,
Other Parameters
----------------
fft_pad : bool, optional
Default on. Zero-pad image to the nearest 2^n
Default on. Zero-pad image to the nearest 2^n. With
``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Default off. Zero-pad image to be at least the sum of the image sizes
(in order to avoid edge-wrapping when smoothing)
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
crop : bool, optional
Default on. Return an image of the size of the largest input image.
If the images are asymmetric in opposite directions, will return the
......@@ -329,6 +335,12 @@ def convolve_fft(array, kernel, boundary='fill', fill_value=0, crop=True,
If crop is not set, returns the image, but with the fft-padded size
instead of the input size
Notes
-----
With psf_pad=True and a large PSF, the resulting data can become very
large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 for further detail.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
......@@ -389,11 +401,11 @@ def convolve_fft(array, kernel, boundary='fill', fill_value=0, crop=True,
kernshape = kernel.shape
array_size_B = (np.product(arrayshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)
if array_size_B > 1024**3 and not allow_huge:
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_B > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be %s. Use "
"allow_huge=True to override this exception."
% human_file_size(array_size_B))
% human_file_size(array_size_B.to(u.byte).value))
# mask catching - masks must be turned into NaNs for use later
if np.ma.is_masked(array):
......@@ -406,12 +418,12 @@ def convolve_fft(array, kernel, boundary='fill', fill_value=0, crop=True,
kernel[mask] = np.nan
# NaN and inf catching
nanmaskarray = np.isnan(array) + np.isinf(array)
nanmaskarray = np.isnan(array) | np.isinf(array)
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) + np.isinf(kernel)
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if ((nanmaskarray.sum() > 0 or nanmaskkernel.sum() > 0) and
not interpolate_nan and not quiet):
if (not interpolate_nan and not quiet and (np.any(nanmaskarray) or
np.any(nanmaskkernel))):
warnings.warn("NOT ignoring NaN values even though they are present "
" (they are treated as 0)", AstropyUserWarning)
......@@ -442,12 +454,27 @@ def convolve_fft(array, kernel, boundary='fill', fill_value=0, crop=True,
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary", AstropyUserWarning)
psf_pad = True
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == 'fill':
# create a boundary region at least as large as the kernel
psf_pad = True
if psf_pad is False:
warnings.warn("psf_pad was set to {0}, which overrides the "
"boundary='fill' setting.".format(psf_pad),
AstropyUserWarning)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == 'wrap':
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
fill_value = 0 # force zero; it should not be used
elif boundary == 'extend':
......@@ -474,6 +501,14 @@ def convolve_fft(array, kernel, boundary='fill', fill_value=0, crop=True,
newshape = np.array([np.max([imsh, kernsh])
for imsh, kernsh in zip(arrayshape, kernshape)])
# perform a second check after padding
array_size_C = (np.product(newshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_C > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be %s. Use "
"allow_huge=True to override this exception."
% human_file_size(array_size_C.to(u.byte).value))
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
......
......@@ -540,7 +540,7 @@ class MexicanHat1DKernel(Kernel1D):
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The
normalization is the same as for `scipy.ndimage.filters.gaussian_laplace`,
normalization is the same as for `scipy.ndimage.gaussian_laplace`,
except for a minus sign.
Parameters
......@@ -610,7 +610,7 @@ class MexicanHat2DKernel(Kernel2D):
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (pi * width ** 4). The normalization
is the same as for `scipy.ndimage.filters.gaussian_laplace`, except
is the same as for `scipy.ndimage.gaussian_laplace`, except
for a minus sign.
Parameters
......
This diff is collapsed.
......@@ -103,8 +103,8 @@ class CdsHeader(core.BaseHeader):
(?P<end> \d+) \s+
(?P<format> [\w.]+) \s+
(?P<units> \S+) \s+
(?P<name> \S+) \s+
(?P<descr> \S.+)""",
(?P<name> \S+)
(\s+ (?P<descr> \S.*))?""",
re.VERBOSE)
cols = []
......@@ -120,13 +120,14 @@ class CdsHeader(core.BaseHeader):
col.unit = match.group('units')
if col.unit == '---':
col.unit = None # "---" is the marker for no unit in CDS table
col.description = match.group('descr').strip()
col.description = (match.group('descr') or '').strip()
col.raw_type = match.group('format')
col.type = self.get_col_type(col)
match = re.match(
r'\? (?P<equal> =)? (?P<nullval> \S*)', col.description, re.VERBOSE)
r'\? (?P<equal> =)? (?P<nullval> \S*) (\s+ (?P<descriptiontext> \S.*))?', col.description, re.VERBOSE)
if match:
col.description=(match.group('descriptiontext') or '').strip()
if issubclass(col.type, core.FloatType):
fillval = 'nan'
else:
......
......@@ -36,6 +36,104 @@ FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
class CsvWriter(object):
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = '2b=48Av%0-V3p>bX'
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (dialect.delimiter == ' ')
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked
......@@ -344,21 +442,17 @@ class DefaultSplitter(BaseSplitter):
delimiter = ' ' if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = csv.writer(self.csv_writer_out,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=escapechar,
quotechar=quotechar,
quoting=self.quoting,
lineterminator='',
)
self.csv_writer_out.seek(0)
self.csv_writer_out.truncate()
self.csv_writer = CsvWriter(delimiter=delimiter,
doublequote=self.doublequote,
escapechar=escapechar,
quotechar=quotechar,
quoting=self.quoting,
lineterminator='')
if self.process_val:
vals = [self.process_val(x) for x in vals]
self.csv_writer.writerow(vals)
out = self.csv_writer.writerow(vals)
return self.csv_writer_out.getvalue()
return out
def _replace_tab_with_space(line, escapechar, quotechar):
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -912,8 +912,8 @@ cdef class FastWriter:
if col.format is None and not (six.PY3 and col.dtype.kind == 'S'):
self.format_funcs.append(None)
else:
self.format_funcs.append(pprint._format_funcs.get(col.format,
auto_format_func))
self.format_funcs.append(pprint._format_funcs.get(
col.format, pprint._auto_format_func))
# col is a numpy.ndarray, so we convert it to
# an ordinary list because csv.writer will call
# np.array_str() on each numpy value, which is
......@@ -952,13 +952,13 @@ cdef class FastWriter:
if not hasattr(output, 'write'): # output is a filename
output = open(output, 'w')
opened_file = True # remember to close file afterwards
writer = csv.writer(output,
delimiter=self.delimiter,
doublequote=True,
escapechar=None,
quotechar=self.quotechar,
quoting=csv.QUOTE_MINIMAL,
lineterminator=os.linesep)
writer = core.CsvWriter(output,
delimiter=self.delimiter,
doublequote=True,
escapechar=None,
quotechar=self.quotechar,
quoting=csv.QUOTE_MINIMAL,
lineterminator=os.linesep)
self._write_header(output, writer, header_output, output_types)
# Split rows into N-sized chunks, since we don't want to
......@@ -1048,38 +1048,3 @@ def get_fill_values(fill_values, read=True):
return (fill_values, fill_empty)
else:
return fill_values # cache for empty values doesn't matter for writing
def auto_format_func(format_, val):
"""
Mimics pprint._auto_format_func for non-numpy values.
"""
if six.callable(format_):
format_func = lambda format_, val: format_(val)
try:
out = format_func(format_, val)
if not isinstance(out, six.string_types):
raise ValueError('Format function for value {0} returned {1} instead of string type'
.format(val, type(val)))
except Exception as err:
raise ValueError('Format function for value {0} failed: {1}'
.format(val, err))
else:
try:
# Convert val to Python object with tolist(). See
# https://github.com/astropy/astropy/issues/148#issuecomment-3930809
out = format_.format(val)
# Require that the format statement actually did something
if out == format_:
raise ValueError
format_func = lambda format_, val: format_.format(val)
except: # Not sure what exceptions might be raised
try:
out = format_ % val
if out == format_:
raise ValueError
format_func = lambda format_, val: format_ % val
except:
raise ValueError('Unable to parse format string {0}'
.format(format_))
pprint._format_funcs[format_] = format_func
return out
......@@ -110,7 +110,7 @@ class LatexHeader(core.BaseHeader):
def start_line(self, lines):
line = find_latex_line(lines, self.header_start)
if line:
if line is not None:
return line + 1
else:
return None
......@@ -384,7 +384,14 @@ class AASTexData(LatexData):
def write(self, lines):
lines.append(self.data_start)
lines_length_initial = len(lines)
core.BaseData.write(self, lines)
# To remove extra space(s) and // appended which creates an extra new line
# in the end.
if len(lines) > lines_length_initial:
# we compile separately because py2.6 doesn't have a flags keyword in re.sub
re_final_line = re.compile(r'\s* \\ \\ \s* $', flags=re.VERBOSE)
lines[-1] = re.sub(re_final_line, '', lines[-1])
lines.append(self.data_end)
add_dictval_to_list(self.latex, 'tablefoot', lines)
lines.append(r'\end{' + self.latex['tabletype'] + r'}')
......
......@@ -53,6 +53,7 @@ def get_package_data():
't/latex1.tex',
't/latex1.tex.gz',
't/latex2.tex',
't/latex3.tex',
't/nls1_stackinfo.dbout',
't/no_data_cds.dat',
't/no_data_daophot.dat',
......@@ -80,6 +81,8 @@ def get_package_data():
't/simple_csv.csv',
't/simple_csv_missing.csv',
't/fixed_width_2_line.txt',
't/cds/description/ReadMe',
't/cds/description/table.dat',
]
}
......
J/A+A/511/A56 Abundances of five open clusters (Pancino+, 2010)
================================================================================
Chemical abundance analysis of the open clusters Cr 110, NGC 2420, NGC 7789,
and M 67 (NGC 2682).
Pancino E., Carrera R., Rossetti, E., Gallart C.
<Astron. Astrophys. 511, A56 (2010)>
=2010A&A...511A..56P
================================================================================
ADC_Keywords: Clusters, open ; Stars, giant ; Equivalent widths ; Spectroscopy
Keywords: stars: abundances - Galaxy: disk -
open clusters and associations: general
Abstract:
The present number of Galactic open clusters that have high resolution
abundance determinations, not only of [Fe/H], but also of other key
elements, is largely insufficient to enable a clear modeling of the
Galactic disk chemical evolution. To increase the number of Galactic
open clusters with high quality measurements, we obtained high
resolution (R~30000), high quality (S/N~50-100 per pixel), echelle
spectra with the fiber spectrograph FOCES, at Calar Alto, Spain, for
three red clump stars in each of five Open Clusters. We used the
classical equivalent width analysis method to obtain accurate
abundances of sixteen elements: Al, Ba, Ca, Co, Cr, Fe, La, Mg, Na,
Nd, Ni, Sc, Si, Ti, V, and Y. We also derived the oxygen abundance
using spectral synthesis of the 6300{AA} forbidden line.
Description:
Atomic data and equivalent widths for 15 red clump giants in 5 open
clusters: Cr 110, NGC 2099, NGC 2420, M 67, NGC 7789.
File Summary:
--------------------------------------------------------------------------------
FileName Lrecl Records Explanations
--------------------------------------------------------------------------------
ReadMe 80 . This file
table1.dat 103 15 Observing logs and programme stars information
table5.dat 56 5265 Atomic data and equivalent widths
--------------------------------------------------------------------------------
See also:
J/A+A/455/271 : Abundances of red giants in NGC 6441 (Gratton+, 2006)
J/A+A/464/953 : Abundances of red giants in NGC 6441 (Gratton+, 2007)
J/A+A/505/117 : Abund. of red giants in 15 globular clusters (Carretta+, 2009)
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 7 A7 --- Cluster Cluster name
9- 12 I4 --- Star
14- 20 F7.2 0.1nm Wave wave
? Wavelength in Angstroms
22- 23 A2 --- El a
24 I1 --- ion ?=0
- Ionization stage (1 for neutral element)
26- 30 F5.2 eV chiEx Excitation potential
32- 37 F6.2 --- loggf Logarithm of the oscillator strength
39- 43 F5.1 0.1pm EW ?=-9.9 Equivalent width (in mA)
46- 49 F4.1 0.1pm e_EW ?=-9.9 rms uncertainty on EW
51- 56 F6.3 --- Q ?=-9.999 DAOSPEC quality parameter Q
(large values are bad)
--------------------------------------------------------------------------------
Acknowledgements:
Elena Pancino, elena.pancino(at)oabo.inaf.it
================================================================================
(End) Elena Pancino [INAF-OABo, Italy], Patricia Vannier [CDS] 23-Nov-2009
Cr110 2108 6696.79 Al1 4.02 -1.42 29.5 2.2 0.289
Cr110 2108 6698.67 Al1 3.14 -1.65 58.0 2.0 0.325
\begin{tabular}{lrr}\hline
cola & colb & colc\\
\hline
a & 1 & 2\\
b & 3 & 4\\
\hline
\end{tabular}
......@@ -21,6 +21,21 @@ def read_table3(readme, data):
return ascii.read(data, readme=readme)
def test_description():
readme = 't/cds/description/ReadMe'
data = 't/cds/description/table.dat'
for read_table in (read_table1, read_table2, read_table3):
table = read_table(readme, data)
assert_equal(len(table), 2)
assert_equal(table['Cluster'].description, 'Cluster name')
assert_equal(table['Star'].description, '')
assert_equal(table['Wave'].description, 'wave? Wavelength in Angstroms')
assert_equal(table['El'].description, 'a')
assert_equal(table['ion'].description, '- Ionization stage (1 for neutral element)')
assert_equal(table['EW'].description, 'Equivalent width (in mA)')
assert_equal(table['Q'].description, 'DAOSPEC quality parameter Q(large values are bad)')
def test_multi_header():
readme = 't/cds/multi/ReadMe'
data = 't/cds/multi/lhs2065.dat'
......@@ -137,3 +152,4 @@ if __name__ == "__main__": # run from main directory; not from test/
test_header_from_readme()
test_multi_header()
test_glob_header()
test_description()
......@@ -780,6 +780,10 @@ def get_testfiles(name=None):
'name': 't/latex2.tex',
'nrows': 3,
'opts': {'Reader': ascii.AASTex}},
{'cols': ('cola', 'colb', 'colc'),
'name': 't/latex3.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Col1', 'Col2', 'Col3', 'Col4'),
'name': 't/fixed_width_2_line.txt',
'nrows': 2,
......
......@@ -98,7 +98,7 @@ ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PE
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{pixels} & \\colhead{pixels} & \\colhead{magnitudes} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable}
"""
......@@ -113,7 +113,7 @@ ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PE
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{[pixel]} & \\colhead{pixels} & \\colhead{[mag]} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable*}
"""
......@@ -468,6 +468,17 @@ def test_write_comments(fast_writer):
expected = ['a b c', '1 2 3']
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize("fmt", ['%0.1f', '.1f', '0.1f', '{0:0.1f}'])
def test_write_format(fast_writer, fmt):
"""Check different formats for a column."""
data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1.11,2.22,3.33')
out = StringIO()
expected = ['# c1', '# c2', '# c3', 'a b c', '1.1 2.22 3.33']
data['a'].format = fmt
ascii.write(data, out, format='basic', fast_writer=fast_writer)
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
def test_strip_names(fast_writer):
"""Names should be stripped of whitespace by default."""
......@@ -493,7 +504,7 @@ def test_latex_units():
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2 \\\\
b & 2
\\enddata
\\end{table}
'''.replace('\n', os.linesep)
......@@ -534,3 +545,18 @@ def test_byte_string_output(fast_writer):
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0', 'Hello', 'World']
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_quoted_empty_field(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0 col1', 'Hello ""', '"" ""']
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer, delimiter=',')
assert out.getvalue().splitlines() == ['col0,col1', 'Hello,', ',']
......@@ -321,6 +321,9 @@ class _FormatX(str):
obj.repeat = repeat
return</