Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import os
import glob
import sys
from os import path
import numpy as np
import astropy.units as u
from astropy.io import fits
from scipy.stats import norm
from scipy.interpolate import interp1d
data_path = os.path.dirname(__file__)
def readcol(filename, **kwargs):
"""
Tries to reproduce the simplicity of the IDL procedure READCOL.
Given a file with some columns of strings and columns of numbers, this
function extract the columns from a file and places them in Numpy vectors
with the proper type:
name, mass = readcol('prova.txt', usecols=(0, 2))
where the file prova.txt contains the following:
##################
# name radius mass
##################
abc 25. 36.
cde 45. 56.
rdh 55 57.
qtr 75. 46.
hdt 47. 56.
##################
This function is a wrapper for numpy.genfromtxt() and accepts the same input.
See the following website for the full documentation
https://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html
"""
f = np.genfromtxt(filename, dtype=None, **kwargs)
t = type(f[0])
if t == np.ndarray or t == np.void: # array or structured array
f = map(np.array, zip(*f))
# In Python 3.x all strings (e.g. name='NGC1023') are Unicode strings by defauls.
# However genfromtxt() returns byte strings b'NGC1023' for non-numeric columns.
# To have the same behaviour in Python 3 as in Python 2, I convert the Numpy
# byte string 'S' type into Unicode strings, which behaves like normal strings.
# With this change I can read the string a='NGC1023' from a text file and the
# test a == 'NGC1023' will give True as expected.
if sys.version >= '3':
f = [v.astype(str) if v.dtype.char=='S' else v for v in f]
return f
def log_rebin(lamRange, spec, oversample=False, velscale=None, flux=False):
"""
Logarithmically rebin a spectrum, while rigorously conserving the flux.
Basically the photons in the spectrum are simply redistributed according
to a new grid of pixels, with non-uniform size in the spectral direction.
When the flux keyword is set, this program performs an exact integration
of the original spectrum, assumed to be a step function within the
linearly-spaced pixels, onto the new logarithmically-spaced pixels.
The output was tested to agree with the analytic solution.
:param lamRange: two elements vector containing the central wavelength
of the first and last pixels in the spectrum, which is assumed
to have constant wavelength scale! E.g. from the values in the
standard FITS keywords: LAMRANGE = CRVAL1 + [0,CDELT1*(NAXIS1-1)].
It must be LAMRANGE[0] < LAMRANGE[1].
:param spec: input spectrum.
:param oversample: Oversampling can be done, not to loose spectral resolution,
especally for extended wavelength ranges and to avoid aliasing.
Default: OVERSAMPLE=1 ==> Same number of output pixels as input.
:param velscale: velocity scale in km/s per pixels. If this variable is
not defined, then it will contain in output the velocity scale.
If this variable is defined by the user it will be used
to set the output number of pixels and wavelength scale.
:param flux: (boolean) True to preserve total flux. In this case the
log rebinning changes the pixels flux in proportion to their
dLam so the following command will show large differences
beween the spectral shape before and after LOG_REBIN:
plt.plot(exp(logLam), specNew) # Plot log-rebinned spectrum
plt.plot(np.linspace(lamRange[0], lamRange[1], spec.size), spec)
By defaul, when this is False, the above two lines produce
two spectra that almost perfectly overlap each other.
:return: [specNew, logLam, velscale]
"""
lamRange = np.asarray(lamRange)
assert len(lamRange) == 2, 'lamRange must contain two elements'
assert lamRange[0] < lamRange[1], 'It must be lamRange[0] < lamRange[1]'
s = spec.shape
assert len(s) == 1, 'input spectrum must be a vector'
n = s[0]
if oversample:
m = int(n*oversample)
else:
m = int(n)
dLam = np.diff(lamRange)/(n - 1.) # Assume constant dLam
lim = lamRange/dLam + [-0.5, 0.5] # All in units of dLam
borders = np.linspace(*lim, num=n+1) # Linearly
logLim = np.log(lim)
c = 299792.458 # Speed of light in km/s
if velscale is None: # Velocity scale is set by user
velscale = np.diff(logLim)/m*c # Only for output
else:
logScale = velscale/c
m = int(np.diff(logLim)/logScale) # Number of output pixels
logLim[1] = logLim[0] + m*logScale
newBorders = np.exp(np.linspace(*logLim, num=m+1)) # Logarithmically
k = (newBorders - lim[0]).clip(0, n-1).astype(int)
specNew = np.add.reduceat(spec, k)[:-1] # Do analytic integral
specNew *= np.diff(k) > 0 # fix for design flaw of reduceat()
specNew += np.diff((newBorders - borders[k])*spec[k])
if not flux:
specNew /= np.diff(newBorders)
# Output log(wavelength): log of geometric mean
logLam = np.log(np.sqrt(newBorders[1:]*newBorders[:-1])*dLam)
return specNew, logLam, velscale
def gaussian_filter1d(spec, sig):
"""
Convolve a spectrum by a Gaussian with different sigma for every pixel.
If all sigma are the same this routine produces the same output as
scipy.ndimage.gaussian_filter1d, except for the border treatment.
Here the first/last p pixels are filled with zeros.
When creating a template library for SDSS data, this implementation
is 60x faster than a naive for loop over pixels.
:param spec: vector with the spectrum to convolve
:param sig: vector of sigma values (in pixels) for every pixel
:return: spec convolved with a Gaussian with dispersion sig
"""
sig = sig.clip(0.01) # forces zero sigmas to have 0.01 pixels
p = int(np.ceil(np.max(3*sig)))
m = 2*p + 1 # kernel size
x2 = np.linspace(-p, p, m)**2
n = spec.size
a = np.zeros((m, n))
for j in range(m): # Loop over the small size of the kernel
a[j, p:-p] = spec[j:n-m+j+1]
gau = np.exp(-x2[:, None]/(2*sig**2))
gau /= np.sum(gau, 0)[None, :] # Normalize kernel
conv_spectrum = np.sum(a*gau, 0)
return conv_spectrum
def calibrate(wave, flux, mag, filtername = 'SLOAN_SDSS.r'):
"""
Calibrate the spectra according to the magnitude.
:param wave: _description_
:type wave: _type_
:param flux: _description_
:type flux: _type_
:param mag: _description_
:type mag: _type_
:param filtername: _description_, defaults to './data/SLOAN_SDSS.r'
:type filtername: str, optional
:return: _description_
:rtype: _type_
"""
# Loading response curve
if filtername == '5100':
wave0 = np.linspace(3000,10000,7000)
response0 = np.zeros(7000)
response0[(wave0 > 5050) & (wave0 < 5150)] = 1.
else:
filter_file = data_path + '/data/filter/' + filtername+'.filter'
wave0, response0 = readcol(filter_file)
# Setting the response
func = interp1d(wave0, response0)
response = np.copy(wave)
ind_extra = (wave > max(wave0)) | (wave < min(wave0))
response[ind_extra] = 0
ind_inside = (wave < max(wave0)) & (wave > min(wave0))
response[ind_inside] = func(wave[ind_inside])
# Flux map of datacube for given filter band
preflux = np.sum(flux * response * np.mean(np.diff(wave))) / np.sum(response * np.mean(np.diff(wave)))
# Real flux from magnitude for given filter
Loading
Loading full blame…