Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import os
import glob
import sys
from os import path
import numpy as np
import astropy.units as u
from astropy.io import fits
from scipy.stats import norm
from scipy.interpolate import interp1d
data_path = os.getenv('GEHONG_DATA_PATH')
def readcol(filename, **kwargs):
"""
readcol, taken from ppxf.
Parameters
----------
filename : string
The name of input ascii file
Returns
-------
float
The value of each columns
"""
f = np.genfromtxt(filename, dtype=None, **kwargs)
t = type(f[0])
if t == np.ndarray or t == np.void: # array or structured array
f = map(np.array, zip(*f))
# In Python 3.x all strings (e.g. name='NGC1023') are Unicode strings by defauls.
# However genfromtxt() returns byte strings b'NGC1023' for non-numeric columns.
# To have the same behaviour in Python 3 as in Python 2, I convert the Numpy
# byte string 'S' type into Unicode strings, which behaves like normal strings.
# With this change I can read the string a='NGC1023' from a text file and the
# test a == 'NGC1023' will give True as expected.
if sys.version >= '3':
f = [v.astype(str) if v.dtype.char=='S' else v for v in f]
return f
def log_rebin(lamRange, spec, oversample=False, velscale=None, flux=False):
"""
Logarithmically rebin a spectrum, while rigorously conserving the flux.
This function is taken from ppxf.
Parameters
----------
lamRange : array
Two elements vector containing the central wavelength
of the first and last pixels in the spectrum
spec : array
Input spectrum
oversample : bool, optional
Oversampling can be done, not to loose spectral resolution,
especally for extended wavelength ranges and to avoid aliasing, by default False
velscale : float, optional
velocity scale in km/s per pixels, by default None
flux : bool, optional
True to preserve total flux, by default False
Returns
-------
specNew : array
Output spectrum
logLam : array
Wavelength array in logarithm
velscale : array
velocity scale in km/s per pixels
"""
lamRange = np.asarray(lamRange)
assert len(lamRange) == 2, 'lamRange must contain two elements'
assert lamRange[0] < lamRange[1], 'It must be lamRange[0] < lamRange[1]'
s = spec.shape
assert len(s) == 1, 'input spectrum must be a vector'
n = s[0]
if oversample:
m = int(n*oversample)
else:
m = int(n)
dLam = np.diff(lamRange)/(n - 1.) # Assume constant dLam
lim = lamRange/dLam + [-0.5, 0.5] # All in units of dLam
borders = np.linspace(*lim, num=n+1) # Linearly
logLim = np.log(lim)
c = 299792.458 # Speed of light in km/s
if velscale is None: # Velocity scale is set by user
velscale = np.diff(logLim)/m*c # Only for output
else:
logScale = velscale/c
m = int(np.diff(logLim)/logScale) # Number of output pixels
logLim[1] = logLim[0] + m*logScale
newBorders = np.exp(np.linspace(*logLim, num=m+1)) # Logarithmically
k = (newBorders - lim[0]).clip(0, n-1).astype(int)
specNew = np.add.reduceat(spec, k)[:-1] # Do analytic integral
specNew *= np.diff(k) > 0 # fix for design flaw of reduceat()
specNew += np.diff((newBorders - borders[k])*spec[k])
if not flux:
specNew /= np.diff(newBorders)
# Output log(wavelength): log of geometric mean
logLam = np.log(np.sqrt(newBorders[1:]*newBorders[:-1])*dLam)
return specNew, logLam, velscale
def gaussian_filter1d(spec, sig):
"""
One-dimensional Gaussian convolution
Parameters
----------
spec : float array
vector with the spectrum to convolve
sig : float
vector of sigma values (in pixels) for every pixel
Returns
-------
float array
Spectrum after convolution
"""
sig = sig.clip(0.01) # forces zero sigmas to have 0.01 pixels
p = int(np.ceil(np.max(3*sig)))
m = 2*p + 1 # kernel size
x2 = np.linspace(-p, p, m)**2
n = spec.size
a = np.zeros((m, n))
for j in range(m): # Loop over the small size of the kernel
a[j, p:-p] = spec[j:n-m+j+1]
gau = np.exp(-x2[:, None]/(2*sig**2))
gau /= np.sum(gau, 0)[None, :] # Normalize kernel
conv_spectrum = np.sum(a*gau, 0)
return conv_spectrum
def calibrate(wave, flux, mag, filtername = 'SLOAN_SDSS.r'):
"""
Flux calibration of spectrum
Parameters
----------
wave : float array
Wavelength of input spectrum
flux : float array
Flux of input spectrum
mag : float
Magnitude used for flux calibration
filtername : str, optional
Filter band name, by default 'SLOAN_SDSS.r'
Returns
-------
float array
Spectrum after flux calibration
"""
# Loading response curve
if filtername == '5100':
wave0 = np.linspace(3000,10000,7000)
response0 = np.zeros(7000)
response0[(wave0 > 5050) & (wave0 < 5150)] = 1.
else:
filter_file = data_path + '/data/filter/' + filtername+'.filter'
wave0, response0 = readcol(filter_file)
# Setting the response
func = interp1d(wave0, response0)
response = np.copy(wave)
ind_extra = (wave > max(wave0)) | (wave < min(wave0))
response[ind_extra] = 0
ind_inside = (wave < max(wave0)) & (wave > min(wave0))
response[ind_inside] = func(wave[ind_inside])
# Flux map of datacube for given filter band
preflux = np.sum(flux * response * np.mean(np.diff(wave))) / np.sum(response * np.mean(np.diff(wave)))
# Real flux from magnitude for given filter
realflux = (mag * u.STmag).to(u.erg/u.s/u.cm**2/u.AA).value
# Normalization
flux_ratio = realflux / preflux
flux_calibrate = flux * flux_ratio * 1e17 # Units: 10^-17 erg/s/A/cm^2
return flux_calibrate
# ----------------
# Reddening Module
def Calzetti_Law(wave, Rv = 4.05):
"""
Dust Extinction Curve of Calzetti et al. (2000)
Loading
Loading full blame…