Commit 36189a3e authored by JX's avatar JX 😵
Browse files

Merge remote-tracking branch 'origin/develop'

parents dd26d370 27646bc4
Pipeline #4509 passed with stage
in 0 seconds
......@@ -97,7 +97,6 @@ class SpecDisperser(object):
self.grating_conf_file = conf
self.ignoreBeam = ignoreBeam
def compute_spec_orders(self):
all_orders = OrderedDict()
......@@ -121,16 +120,18 @@ class SpecDisperser(object):
ytrace_beam, lam_beam = self.grating_conf.get_beam_trace(x=self.xcenter, y=self.ycenter, dx=(dx + xoff),
beam=beam)
### Account for pixel centering of the trace
# Account for pixel centering of the trace
yfrac_beam = ytrace_beam - floor(ytrace_beam+0.5)
ysens = lam_beam * 0
lam_index = argsort(lam_beam)
conf_sens = self.grating_conf.sens[beam]
lam_intep = np.linspace(self.band_start, self.band_end, int((self.band_end - self.band_start) / 0.1))
lam_intep = np.linspace(self.band_start, self.band_end, int(
(self.band_end - self.band_start) / 0.1))
thri = interpolate.interp1d(conf_sens['WAVELENGTH'], conf_sens['SENSITIVITY'])
thri = interpolate.interp1d(
conf_sens['WAVELENGTH'], conf_sens['SENSITIVITY'])
spci = interpolate.interp1d(self.spec['WAVELENGTH'], self.spec['FLUX'])
beam_thr = thri(lam_intep)
......@@ -138,7 +139,7 @@ class SpecDisperser(object):
bean_thr_spec = beam_thr * spec_sample
###generate sensitivity file for aXe
# generate sensitivity file for aXe
# ysensitivity = lam_beam * 0
#
# ysensitivity[lam_index] = interp.interp_conserve_c(lam_beam[lam_index], lam_intep,
......@@ -155,7 +156,8 @@ class SpecDisperser(object):
sensitivity_beam = ysens
len_spec_x = len(dx)
len_spec_y = int(abs(ceil(ytrace_beam[-1]) - floor(ytrace_beam[0])) + 1)
len_spec_y = int(
abs(ceil(ytrace_beam[-1]) - floor(ytrace_beam[0])) + 1)
beam_sh = (self.img_sh[0] + len_spec_y, self.img_sh[1] + len_spec_x)
modelf = zeros(product(beam_sh), dtype=float)
......@@ -169,7 +171,7 @@ class SpecDisperser(object):
dypix = cast[int](np.floor(ytrace_beam - dyc[0] + x0[0] + 0.5))
frac_ids = yfrac_beam<0
frac_ids = yfrac_beam < 0
dypix[frac_ids] = dypix[frac_ids] - 1
yfrac_beam[frac_ids] = 1+yfrac_beam[frac_ids]
......@@ -199,7 +201,8 @@ class SpecDisperser(object):
else:
beam_flat = zeros([len(modelf), len(self.flat_cube)])
sub_flat_cube = zeros([len(self.flat_cube),beam_sh[0], beam_sh[1]])
sub_flat_cube = zeros(
[len(self.flat_cube), beam_sh[0], beam_sh[1]])
sub_flat_cube[0] = sub_flat_cube[0] + 1.
overlap_flag = 1
......@@ -210,23 +213,28 @@ class SpecDisperser(object):
sub_x_e = originOut_x + beam_sh[1] - 1
beam_x_s = max(sub_x_s, 0)
if beam_x_s > self.flat_cube[0].shape[1] - 1: overlap_flag = 0
if beam_x_s > self.flat_cube[0].shape[1] - 1:
overlap_flag = 0
if overlap_flag == 1:
beam_x_e = min(sub_x_e, self.flat_cube[0].shape[1] - 1)
if beam_x_e < 0: overlap_flag = 0
if beam_x_e < 0:
overlap_flag = 0
if overlap_flag == 1:
beam_y_s = max(sub_y_s, 0)
if beam_y_s > self.flat_cube[0].shape[0] - 1: overlap_flag = 0
if beam_y_s > self.flat_cube[0].shape[0] - 1:
overlap_flag = 0
if overlap_flag == 1:
beam_y_e = min(sub_y_e, self.flat_cube[0].shape[0] - 1)
if beam_y_e < 0: overlap_flag = 0
if beam_y_e < 0:
overlap_flag = 0
if overlap_flag == 1:
sub_flat_cube[:,beam_y_s-originOut_y:beam_y_e-originOut_y+1,beam_x_s-originOut_x:beam_x_e-originOut_x+1] = self.flat_cube[:,beam_y_s:beam_y_e+1,beam_x_s:beam_x_e+1]
sub_flat_cube[:, beam_y_s-originOut_y:beam_y_e-originOut_y+1, beam_x_s-originOut_x:beam_x_e -
originOut_x+1] = self.flat_cube[:, beam_y_s:beam_y_e+1, beam_x_s:beam_x_e+1]
for i in arange(0, len(self.flat_cube), 1):
beam_flat[:,i] = sub_flat_cube[i].flatten()
beam_flat[:, i] = sub_flat_cube[i].flatten()
# beam_flat = zeros([len(modelf), len(self.flat_cube)])
# flat_sh = self.flat_cube[0].shape
# for i in arange(0, beam_sh[0], 1):
......@@ -243,7 +251,8 @@ class SpecDisperser(object):
flat_index[nonz], yfrac_beam[nonz],
sensitivity_beam[nonz],
modelf, x0,
array(self.img_sh, dtype=int64),
array(self.img_sh,
dtype=int64),
array(beam_sh, dtype=int64),
beam_flat,
lam_beam[lam_index][nonz])
......@@ -254,13 +263,15 @@ class SpecDisperser(object):
if self.isAlongY == 1:
model, _, _ = rotate90(array_orig=model, isClockwise=0)
return model, originOut_x, originOut_y, dxpix, dypix, lam_beam,ysens
return model, originOut_x, originOut_y, dxpix, dypix, lam_beam, ysens
def writerSensitivityFile(self, conffile='', beam='', w=None, sens=None):
orders = {'A': '1st', 'B': '0st', 'C': '2st', 'D': '-1st', 'E': '-2st'}
sens_file_name = conffile[0:-5] + '_sensitivity_' + orders[beam] + '.fits'
sens_file_name = conffile[0:-5] + \
'_sensitivity_' + orders[beam] + '.fits'
if not os.path.exists(sens_file_name) == True:
senstivity_out = Table(array([w, sens]).T, names=('WAVELENGTH', 'SENSITIVITY'))
senstivity_out = Table(
array([w, sens]).T, names=('WAVELENGTH', 'SENSITIVITY'))
senstivity_out.write(sens_file_name, format='fits')
......@@ -284,7 +295,7 @@ class aXeConf():
self.conf_file = conf_file
self.count_beam_orders()
## Global XOFF/YOFF offsets
# Global XOFF/YOFF offsets
if 'XOFF' in self.conf.keys():
self.xoff = np.float(self.conf['XOFF'])
else:
......@@ -310,11 +321,11 @@ class aXeConf():
conf = OrderedDict()
lines = open(conf_file).readlines()
for line in lines:
## empty / commented lines
# empty / commented lines
if (line.startswith('#')) | (line.strip() == '') | ('"' in line):
continue
## split the line, taking out ; and # comments
# split the line, taking out ; and # comments
spl = line.split(';')[0].split('#')[0].split()
param = spl[0]
if len(spl) > 2:
......@@ -360,13 +371,14 @@ class aXeConf():
self.beams.append(beam)
self.dxlam[beam] = np.arange(self.conf['BEAM{0}'.format(beam)].min(),
self.conf['BEAM{0}'.format(beam)].max(), dtype=int)
self.nx[beam] = int(self.dxlam[beam].max() - self.dxlam[beam].min()) + 1
self.nx[beam] = int(self.dxlam[beam].max() -
self.dxlam[beam].min()) + 1
self.sens[beam] = Table.read(
'{0}/{1}'.format(os.path.dirname(self.conf_file), self.conf['SENSITIVITY_{0}'.format(beam)]))
# self.sens[beam].wave = np.cast[np.double](self.sens[beam]['WAVELENGTH'])
# self.sens[beam].sens = np.cast[np.double](self.sens[beam]['SENSITIVITY'])
### Need doubles for interpolating functions
# Need doubles for interpolating functions
for col in self.sens[beam].colnames:
data = np.cast[np.double](self.sens[beam][col])
self.sens[beam].remove_column(col)
......@@ -394,22 +406,22 @@ class aXeConf():
Evaluated field-dependent coefficients
"""
## number of coefficients for a given polynomial order
## 1:1, 2:3, 3:6, 4:10, order:order*(order+1)/2
# number of coefficients for a given polynomial order
# 1:1, 2:3, 3:6, 4:10, order:order*(order+1)/2
if isinstance(coeffs, float):
order = 1
else:
order = int(-1 + np.sqrt(1 + 8 * len(coeffs))) // 2
## Build polynomial terms array
## $a = a_0+a_1x_i+a_2y_i+a_3x_i^2+a_4x_iy_i+a_5yi^2+$ ...
# Build polynomial terms array
# $a = a_0+a_1x_i+a_2y_i+a_3x_i^2+a_4x_iy_i+a_5yi^2+$ ...
xy = []
for p in range(order):
for px in range(p + 1):
# print 'x**%d y**%d' %(p-px, px)
xy.append(xi ** (p - px) * yi ** (px))
## Evaluate the polynomial, allowing for N-dimensional inputs
# Evaluate the polynomial, allowing for N-dimensional inputs
a = np.sum((np.array(xy).T * coeffs).T, axis=0)
return a
......@@ -445,26 +457,27 @@ class aXeConf():
.. math:: dp = (u \sqrt{1+u^2} + \mathrm{arcsinh}\ u) / (4\cdot \mathrm{DYDX}[2])
"""
## dp is the arc length along the trace
## $\lambda = dldp_0 + dldp_1 dp + dldp_2 dp^2$ ...
# dp is the arc length along the trace
# $\lambda = dldp_0 + dldp_1 dp + dldp_2 dp^2$ ...
poly_order = len(dydx) - 1
if (poly_order == 2):
if np.abs(np.unique(dydx[2])).max() == 0:
poly_order = 1
if poly_order == 0: ## dy=0
if poly_order == 0: # dy=0
dp = dx
elif poly_order == 1: ## constant dy/dx
elif poly_order == 1: # constant dy/dx
dp = np.sqrt(1 + dydx[1] ** 2) * (dx)
elif poly_order == 2: ## quadratic trace
elif poly_order == 2: # quadratic trace
u0 = dydx[1] + 2 * dydx[2] * (0)
dp0 = (u0 * np.sqrt(1 + u0 ** 2) + np.arcsinh(u0)) / (4 * dydx[2])
u = dydx[1] + 2 * dydx[2] * (dx)
dp = (u * np.sqrt(1 + u ** 2) + np.arcsinh(u)) / (4 * dydx[2]) - dp0
dp = (u * np.sqrt(1 + u ** 2) + np.arcsinh(u)) / \
(4 * dydx[2]) - dp0
else:
## high order shape, numerical integration along trace
## (this can be slow)
# high order shape, numerical integration along trace
# (this can be slow)
xmin = np.minimum((dx).min(), 0)
xmax = np.maximum((dx).max(), 0)
xfull = np.arange(xmin, xmax)
......@@ -472,11 +485,12 @@ class aXeConf():
for i in range(1, poly_order):
dyfull += i * dydx[i] * (xfull - 0.5) ** (i - 1)
## Integrate from 0 to dx / -dx
# Integrate from 0 to dx / -dx
dpfull = xfull * 0.
lt0 = xfull < 0
if lt0.sum() > 1:
dpfull[lt0] = np.cumsum(np.sqrt(1 + dyfull[lt0][::-1] ** 2))[::-1]
dpfull[lt0] = np.cumsum(
np.sqrt(1 + dyfull[lt0][::-1] ** 2))[::-1]
dpfull[lt0] *= -1
#
......@@ -520,10 +534,12 @@ class aXeConf():
NORDER = self.orders[beam] + 1
xi, yi = x - self.xoff, y - self.yoff
xoff_beam = self.field_dependent(xi, yi, self.conf['XOFF_{0}'.format(beam)])
yoff_beam = self.field_dependent(xi, yi, self.conf['YOFF_{0}'.format(beam)])
xoff_beam = self.field_dependent(
xi, yi, self.conf['XOFF_{0}'.format(beam)])
yoff_beam = self.field_dependent(
xi, yi, self.conf['YOFF_{0}'.format(beam)])
## y offset of trace (DYDX)
# y offset of trace (DYDX)
dydx = np.zeros(NORDER) # 0 #+1.e-80
dydx = [0] * NORDER
......@@ -538,7 +554,7 @@ class aXeConf():
for i in range(NORDER):
dy += dydx[i] * (dx - xoff_beam) ** i
## wavelength solution
# wavelength solution
dldp = np.zeros(NORDER)
dldp = [0] * NORDER
......@@ -556,7 +572,7 @@ class aXeConf():
# ## dp is the arc length along the trace
# ## $\lambda = dldp_0 + dldp_1 dp + dldp_2 dp^2$ ...
# if self.conf['DYDX_ORDER_%s' %(beam)] == 0: ## dy=0
# dp = dx-xoff_beam
# dp = dx-xoff_beam
# elif self.conf['DYDX_ORDER_%s' %(beam)] == 1: ## constant dy/dx
# dp = np.sqrt(1+dydx[1]**2)*(dx-xoff_beam)
# elif self.conf['DYDX_ORDER_%s' %(beam)] == 2: ## quadratic trace
......@@ -573,7 +589,7 @@ class aXeConf():
# dyfull = 0
# for i in range(1, NORDER):
# dyfull += i*dydx[i]*(xfull-0.5)**(i-1)
#
#
# ## Integrate from 0 to dx / -dx
# dpfull = xfull*0.
# lt0 = xfull <= 0
......@@ -584,10 +600,10 @@ class aXeConf():
# gt0 = xfull >= 0
# if gt0.sum() > 0:
# dpfull[gt0] = np.cumsum(np.sqrt(1+dyfull[gt0]**2))
#
#
# dp = np.interp(dx-xoff_beam, xfull, dpfull)
## Evaluate dldp
# Evaluate dldp
lam = dp * 0.
for i in range(NORDER):
lam += dldp[i] * dp ** i
......@@ -619,7 +635,8 @@ class aXeConf():
if 'XOFF_{0}'.format(beam) not in self.conf.keys():
continue
xoff = self.field_dependent(x0, x1, self.conf['XOFF_{0}'.format(beam)])
xoff = self.field_dependent(
x0, x1, self.conf['XOFF_{0}'.format(beam)])
dy, lam = self.get_beam_trace(x0, x1, dx=dx, beam=beam)
xlim = self.conf['BEAM{0}'.format(beam)]
ok = (dx >= xlim[0]) & (dx <= xlim[1])
......@@ -627,7 +644,8 @@ class aXeConf():
alpha=0.5, edgecolor='None')
plt.text(np.median(dx[ok]), np.median(dy[ok]) + 1, beam,
ha='center', va='center', fontsize=14)
print('Beam {0}, lambda=({1:.1f} - {2:.1f})'.format(beam, lam[ok].min(), lam[ok].max()))
print('Beam {0}, lambda=({1:.1f} - {2:.1f})'.format(beam,
lam[ok].min(), lam[ok].max()))
plt.grid()
plt.xlabel(r'$\Delta x$')
......@@ -650,7 +668,7 @@ class aXeConf():
# Returns
# -------
# conf : `~grizli.grismconf.aXeConf`
# Configuration file object. Runs `conf.get_beams()` to read the
# Configuration file object. Runs `conf.get_beams()` to read the
# sensitivity curves.
# """
# conf = aXeConf(conf_file)
......
import os, sys
import os
import sys
import numpy as np
import galsim
import astropy.constants as cons
from astropy.table import Table
from scipy import interpolate
from ObservationSim.MockObject.MockObject import MockObject
from ObservationSim.MockObject.SpecDisperser import SpecDisperser
from ObservationSim.MockObject._util import eObs, integrate_sed_bandpass, getNormFactorForSpecWithABMAG, getObservedSED, getABMAG,convolveGaussXorders
from observation_sim.mock_objects.MockObject import MockObject
from observation_sim.mock_objects.SpecDisperser import SpecDisperser
from observation_sim.mock_objects._util import eObs, integrate_sed_bandpass, getNormFactorForSpecWithABMAG, getObservedSED, getABMAG, convolveGaussXorders
class Stamp(MockObject):
def __init__(self, param, logger=None):
......@@ -22,17 +24,17 @@ class Stamp(MockObject):
if nphotons_tot == None:
nphotons_tot = self.getElectronFluxFilt(filt, tel, exptime)
try:
full = integrate_sed_bandpass(sed=self.sed, bandpass=filt.bandpass_full)
full = integrate_sed_bandpass(
sed=self.sed, bandpass=filt.bandpass_full)
except Exception as e:
print(e)
self.logger.error(e)
return False
#nphotons_sum = 0
#photons_list = []
#xmax, ymax = 0, 0
# nphotons_sum = 0
# photons_list = []
# xmax, ymax = 0, 0
if self.getMagFilter(filt) <= 15:
folding_threshold = 5.e-4
......@@ -72,17 +74,18 @@ class Stamp(MockObject):
nphotons = ratio * nphotons_tot
else:
continue
#nphotons_sum += nphotons
# nphotons_sum += nphotons
psf, pos_shear = psf_model.get_PSF(chip=chip, pos_img=pos_img, bandpass=bandpass, folding_threshold=folding_threshold)
psf, pos_shear = psf_model.get_PSF(
chip=chip, pos_img=pos_img, bandpass=bandpass, folding_threshold=folding_threshold)
_gal = self.param['image']
galImg= galsim.ImageF(_gal, scale=self.param['pixScale'])
gal_temp= galsim.InterpolatedImage(galImg)
gal_temp= gal_temp.shear(gal_shear)
gal_temp= gal_temp.withFlux(nphotons)
_gal = self.param['image']
galImg = galsim.ImageF(_gal, scale=self.param['pixScale'])
gal_temp = galsim.InterpolatedImage(galImg)
gal_temp = gal_temp.shear(gal_shear)
gal_temp = gal_temp.withFlux(nphotons)
gal_temp= galsim.Convolve(psf, gal_temp)
gal_temp = galsim.Convolve(psf, gal_temp)
if i == 0:
gal = gal_temp
......@@ -95,7 +98,8 @@ class Stamp(MockObject):
return 2, pos_shear
stamp.setCenter(x_nominal, y_nominal)
bounds = stamp.bounds & galsim.BoundsI(0, chip.npix_x - 1, 0, chip.npix_y - 1)
bounds = stamp.bounds & galsim.BoundsI(
0, chip.npix_x - 1, 0, chip.npix_y - 1)
if bounds.area() > 0:
chip.img.setOrigin(0, 0)
......@@ -105,22 +109,22 @@ class Stamp(MockObject):
del stamp
if is_updated == 0:
print("fits obj %s missed"%(self.id))
print("fits obj %s missed" % (self.id))
if self.logger:
self.logger.info("fits obj %s missed"%(self.id))
self.logger.info("fits obj %s missed" % (self.id))
return 0, pos_shear
return 1, pos_shear
def drawObj_slitless(self, tel, pos_img, psf_model, bandpass_list, filt, chip, nphotons_tot=None, g1=0, g2=0,
exptime=150., normFilter=None, grating_split_pos=3685, fd_shear=None):
if normFilter is not None:
norm_thr_rang_ids = normFilter['SENSITIVITY'] > 0.001
sedNormFactor = getNormFactorForSpecWithABMAG(ABMag=self.param['mag_use_normal'], spectrum=self.sed,
norm_thr=normFilter,
sWave=np.floor(normFilter[norm_thr_rang_ids][0][0]),
eWave=np.ceil(normFilter[norm_thr_rang_ids][-1][0]))
norm_thr=normFilter,
sWave=np.floor(
normFilter[norm_thr_rang_ids][0][0]),
eWave=np.ceil(normFilter[norm_thr_rang_ids][-1][0]))
if sedNormFactor == 0:
return 2, None
else:
......@@ -140,7 +144,6 @@ class Stamp(MockObject):
chip_wcs_local = self.chip_wcs.local(self.real_pos)
if self.getMagFilter(filt) <= 15:
folding_threshold = 5.e-4
else:
......@@ -150,7 +153,8 @@ class Stamp(MockObject):
flat_cube = chip.flat_cube
xOrderSigPlus = {'A':1.3909419820029296,'B':1.4760376591236062,'C':4.035447379743442,'D':5.5684364343742825,'E':16.260021029735388}
xOrderSigPlus = {'A': 1.3909419820029296, 'B': 1.4760376591236062,
'C': 4.035447379743442, 'D': 5.5684364343742825, 'E': 16.260021029735388}
grating_split_pos_chip = 0 + grating_split_pos
branges = np.zeros([len(bandpass_list), 2])
......@@ -174,9 +178,9 @@ class Stamp(MockObject):
# psf, pos_shear = psf_model.get_PSF(chip=chip, pos_img=pos_img, bandpass=bandpass, folding_threshold=folding_threshold)
_gal = self.param['image']
galImg= galsim.ImageF(_gal, scale=self.param['pixScale'])
gal = galsim.InterpolatedImage(galImg)
_gal = self.param['image']
galImg = galsim.ImageF(_gal, scale=self.param['pixScale'])
gal = galsim.InterpolatedImage(galImg)
# (TEST) Random knots
# knots = galsim.RandomKnots(npoints=100, profile=disk)
......@@ -196,40 +200,43 @@ class Stamp(MockObject):
# # if fd_shear is not None:
# # gal = gal.shear(fd_shear)
starImg = gal.drawImage(wcs=chip_wcs_local, offset=offset,method = 'real_space')
starImg = gal.drawImage(
wcs=chip_wcs_local, offset=offset, method='real_space')
origin_star = [y_nominal - (starImg.center.y - starImg.ymin),
x_nominal - (starImg.center.x - starImg.xmin)]
starImg.setOrigin(0, 0)
gal_origin = [origin_star[0], origin_star[1]]
gal_end = [origin_star[0] + starImg.array.shape[0] - 1, origin_star[1] + starImg.array.shape[1] - 1]
gal_end = [origin_star[0] + starImg.array.shape[0] -
1, origin_star[1] + starImg.array.shape[1] - 1]
if gal_origin[1] < grating_split_pos_chip < gal_end[1]:
subSlitPos = int(grating_split_pos_chip - gal_origin[1] + 1)
## part img disperse
# part img disperse
subImg_p1 = starImg.array[:, 0:subSlitPos]
star_p1 = galsim.Image(subImg_p1)
star_p1.setOrigin(0, 0)
origin_p1 = origin_star
xcenter_p1 = min(x_nominal,grating_split_pos_chip-1) - 0
xcenter_p1 = min(x_nominal, grating_split_pos_chip-1) - 0
ycenter_p1 = y_nominal-0
sdp_p1 = SpecDisperser(orig_img=star_p1, xcenter=xcenter_p1,
ycenter=ycenter_p1, origin=origin_p1,
tar_spec=normalSED,
band_start=brange[0], band_end=brange[1],
conf=chip.sls_conf[0],
isAlongY=0,
flat_cube=flat_cube)
ycenter=ycenter_p1, origin=origin_p1,
tar_spec=normalSED,
band_start=brange[0], band_end=brange[1],
conf=chip.sls_conf[0],
isAlongY=0,
flat_cube=flat_cube)
# self.addSLStoChipImage(sdp=sdp_p1, chip=chip, xOrderSigPlus = xOrderSigPlus, local_wcs=chip_wcs_local)
pos_shear = self.addSLStoChipImageWithPSF(sdp=sdp_p1, chip=chip, pos_img_local=[xcenter_p1, ycenter_p1],
psf_model=psf_model, bandNo=i + 1,
grating_split_pos=grating_split_pos,
local_wcs=chip_wcs_local, pos_img = pos_img)
local_wcs=chip_wcs_local, pos_img=pos_img)
subImg_p2 = starImg.array[:, subSlitPos+1:starImg.array.shape[1]]
subImg_p2 = starImg.array[:,
subSlitPos+1:starImg.array.shape[1]]
star_p2 = galsim.Image(subImg_p2)
star_p2.setOrigin(0, 0)
origin_p2 = [origin_star[0], grating_split_pos_chip]
......@@ -248,11 +255,11 @@ class Stamp(MockObject):
pos_shear = self.addSLStoChipImageWithPSF(sdp=sdp_p2, chip=chip, pos_img_local=[xcenter_p2, ycenter_p2],
psf_model=psf_model, bandNo=i + 1,
grating_split_pos=grating_split_pos,
local_wcs=chip_wcs_local, pos_img = pos_img)
local_wcs=chip_wcs_local, pos_img=pos_img)
del sdp_p1
del sdp_p2
elif grating_split_pos_chip<=gal_origin[1]:
elif grating_split_pos_chip <= gal_origin[1]:
sdp = SpecDisperser(orig_img=starImg, xcenter=x_nominal - 0,
ycenter=y_nominal - 0, origin=origin_star,
tar_spec=normalSED,
......@@ -264,9 +271,9 @@ class Stamp(MockObject):
pos_shear = self.addSLStoChipImageWithPSF(sdp=sdp, chip=chip, pos_img_local=[x_nominal, y_nominal],
psf_model=psf_model, bandNo=i + 1,
grating_split_pos=grating_split_pos,
local_wcs=chip_wcs_local, pos_img = pos_img)
local_wcs=chip_wcs_local, pos_img=pos_img)
del sdp
elif grating_split_pos_chip>=gal_end[1]:
elif grating_split_pos_chip >= gal_end[1]:
sdp = SpecDisperser(orig_img=starImg, xcenter=x_nominal - 0,
ycenter=y_nominal - 0, origin=origin_star,
tar_spec=normalSED,
......@@ -278,7 +285,7 @@ class Stamp(MockObject):
pos_shear = self.addSLStoChipImageWithPSF(sdp=sdp, chip=chip, pos_img_local=[x_nominal, y_nominal],
psf_model=psf_model, bandNo=i + 1,
grating_split_pos=grating_split_pos,
local_wcs=chip_wcs_local, pos_img = pos_img)
local_wcs=chip_wcs_local, pos_img=pos_img)
del sdp
# print(self.y_nominal, starImg.center.y, starImg.ymin)
......
......@@ -6,8 +6,8 @@ import astropy.constants as cons
from astropy.table import Table
from scipy import interpolate
from ObservationSim.MockObject._util import integrate_sed_bandpass, getNormFactorForSpecWithABMAG, getObservedSED, getABMAG, tag_sed
from ObservationSim.MockObject.MockObject import MockObject
from observation_sim.mock_objects._util import integrate_sed_bandpass, getNormFactorForSpecWithABMAG, getObservedSED, getABMAG, tag_sed
from observation_sim.mock_objects.MockObject import MockObject
class Star(MockObject):
......
......@@ -5,5 +5,3 @@ from .Quasar import Quasar
from .Star import Star
from .Stamp import Stamp
from .FlatLED import FlatLED
# from .SkybackgroundMap import *
# from .CosmicRay import CosmicRay
......@@ -7,16 +7,19 @@ import galsim
VC_A = 2.99792458e+18 # speed of light: A/s
VC_M = 2.99792458e+8 # speed of light: m/s
H_PLANK = 6.626196e-27 # Plank constant: erg s
H_PLANK = 6.626196e-27 # Plank constant: erg s
def comoving_dist(z, om_m=0.3111, om_L=0.6889, h=0.6766):
def comoving_dist(z, om_m=0.3111, om_L=0.6889, h=0.6766):
# Return comving distance in pc
H0 = h*100. # km / (s Mpc)
H0 = h*100. # km / (s Mpc)
def dist_int(z):
return 1./np.sqrt(om_m*(1.+z)**3 + om_L)
res, err = integrate.quad(dist_int, 0., z)
return [res * (VC_M/1e3/H0) * 1e6, err * (VC_M/1e3/H0) * 1e6]
def magToFlux(mag):
"""
flux of a given AB magnitude
......@@ -30,6 +33,7 @@ def magToFlux(mag):
flux = 10**(-0.4*(mag+48.6))
return flux
def extAv(nav, seed=1212123):
"""
Generate random intrinsic extinction Av
......@@ -39,7 +43,7 @@ def extAv(nav, seed=1212123):
tau = 0.4
peak, a = 0.1, 0.5
b = a*(tau-peak)
pav = lambda av: (a*av+b)*np.exp(-av/tau)
def pav(av): return (a*av+b)*np.exp(-av/tau)
avmin, avmax = 0., 3.
avs = np.linspace(avmin, avmax, int((avmax-avmin)/0.001)+1)
norm = np.trapz(pav(avs), avs)
......@@ -66,18 +70,20 @@ def seds(sedlistn, seddir="./", unit="A"):
reds = {}
sedlist = seddir + sedlistn
sedn = open(sedlist).read().splitlines()
sedtype = range(1,len(sedn)+1)
sedtype = range(1, len(sedn)+1)
for i in range(len(sedn)):
xxx = sedn[i].split()
isedn = seddir+xxx[0]
itype = sedtype[i]
ised = np.loadtxt(isedn)
if unit=="nm": ised[:,0] *= 10.0
if unit == "nm":
ised[:, 0] *= 10.0
seds[itype] = ised
reds[itype] = int(xxx[1])
return seds, reds
def sed_assign(phz, btt, rng):
"""
assign SED template to a galaxy.
......@@ -106,6 +112,8 @@ def sed_assign(phz, btt, rng):
return sedtype
###########################################
def tflux(filt, sed, redshift=0.0, av=0.0, redden=0):
"""
calculate the theoretical SED for given filter set and template
......@@ -130,44 +138,47 @@ def tflux(filt, sed, redshift=0.0, av=0.0, redden=0):
SED in observed frame
"""
z = redshift + 1.0
sw, sf = sed[:,0], sed[:,1]
sw, sf = sed[:, 0], sed[:, 1]
# reddening
sf = reddening(sw, sf, av=av, model=redden)
sw, sf = sw*z, sf*(z**3)
# lyman forest correction
sf = lyman_forest(sw, sf, redshift)
sedxx = (sw.copy(), sf.copy())
sw = VC_A/sw
sf = sf*(VC_A/sw**2) # convert flux unit to erg/s/cm^s/Hz
sf = sf*(VC_A/sw**2) # convert flux unit to erg/s/cm^s/Hz
sw, sf = sw[::-1], sf[::-1]
sfun = interp1d(sw, sf, kind='linear')
fwave, fresp = filt[:,0], filt[:,1]
fwave, fresp = filt[:, 0], filt[:, 1]
fwave = VC_A/fwave
fwave, fresp = fwave[::-1], fresp[::-1]
tflux = sfun(fwave)
zpflux = 3.631*1.0e-20
tflux = np.trapz(tflux*fresp/fwave,fwave)/np.trapz(zpflux*fresp/fwave,fwave)
#tflux = np.trapz(tflux*fresp,fwave)/np.trapz(zpflux*fresp,fwave)
tflux = np.trapz(tflux*fresp/fwave, fwave) / \
np.trapz(zpflux*fresp/fwave, fwave)
# tflux = np.trapz(tflux*fresp,fwave)/np.trapz(zpflux*fresp,fwave)
return tflux, sedxx
###########################################
def lyman_forest(wavelen, flux, z):
"""
Compute the Lyman forest mean absorption of an input spectrum,
according to D_A and D_B evolution from Madau (1995).
The waveeln and flux are in observed frame
"""
if z<=0:
if z <= 0:
flux0 = flux
else:
nw = 200
istep = np.linspace(0,nw-1,nw)
istep = np.linspace(0, nw-1, nw)
w1a, w2a = 1050.0*(1.0+z), 1170.0*(1.0+z)
w1b, w2b = 920.0*(1.0+z), 1015.0*(1.0+z)
wstepa = (w2a-w1a)/float(nw)
......@@ -177,20 +188,25 @@ def lyman_forest(wavelen, flux, z):
ptaua = np.exp(-3.6e-03*(wtempa/1216.0)**3.46)
wtempb = w1b + istep*wstepb
ptaub = np.exp(-1.7e-3*(wtempb/1026.0)**3.46\
-1.2e-3*(wtempb/972.50)**3.46\
-9.3e-4*(wtempb/950.00)**3.46)
ptaub = np.exp(-1.7e-3*(wtempb/1026.0)**3.46
- 1.2e-3*(wtempb/972.50)**3.46
- 9.3e-4*(wtempb/950.00)**3.46)
da = (1.0/(120.0*(1.0+z)))*np.trapz(ptaua, wtempa)
db = (1.0/(95.0*(1.0+z)))*np.trapz(ptaub, wtempb)
if da>1.0: da=1.0
if db>1.0: db=1.0
if da<0.0: da=0.0
if db<0.0: db=0.0
if da > 1.0:
da = 1.0
if db > 1.0:
db = 1.0
if da < 0.0:
da = 0.0
if db < 0.0:
db = 0.0
flux0 = flux.copy()
id0 = wavelen<=1026.0*(1.0+z)
id1 = np.logical_and(wavelen<1216.0*(1.0+z),wavelen>=1026.0*(1.0+z))
id0 = wavelen <= 1026.0*(1.0+z)
id1 = np.logical_and(wavelen < 1216.0*(1.0+z),
wavelen >= 1026.0*(1.0+z))
flux0[id0] = db*flux[id0]
flux0[id1] = da*flux[id1]
......@@ -220,128 +236,131 @@ def reddening(sw, sf, av=0.0, model=0):
Return:
reddening-corrected flux or observed flux
"""
if model==0 or av==0.0:
flux=sf
elif model==1: # Allen (1976) for the Milky Way
lambda0 = np.array([1000, 1110, 1250, 1430, 1670, \
2000, 2220, 2500, 2850, 3330, \
3650, 4000, 4400, 5000, 5530, \
if model == 0 or av == 0.0:
flux = sf
elif model == 1: # Allen (1976) for the Milky Way
lambda0 = np.array([1000, 1110, 1250, 1430, 1670,
2000, 2220, 2500, 2850, 3330,
3650, 4000, 4400, 5000, 5530,
6700, 9000, 10000, 20000, 100000], dtype=float)
kR = np.array([4.20, 3.70, 3.30, 3.00, 2.70, \
2.80, 2.90, 2.30, 1.97, 1.69, \
1.58, 1.45, 1.32, 1.13, 1.00, \
0.74, 0.46, 0.38, 0.11, 0.00],dtype=float)
kR = np.array([4.20, 3.70, 3.30, 3.00, 2.70,
2.80, 2.90, 2.30, 1.97, 1.69,
1.58, 1.45, 1.32, 1.13, 1.00,
0.74, 0.46, 0.38, 0.11, 0.00], dtype=float)
ext0 = InterpolatedUnivariateSpline(lambda0, kR, k=1)
A_lambda = av*ext0(sw)
A_lambda[A_lambda<0.0] = 0.0
A_lambda[A_lambda < 0.0] = 0.0
flux = sf*10**(-0.4*A_lambda)
elif model==2: # Seaton (1979) fit by Fitzpatrick (1986) for the Milky Way
Rv=3.1
elif model == 2: # Seaton (1979) fit by Fitzpatrick (1986) for the Milky Way
Rv = 3.1
al0, ga, c1, c2, c3, c4 = 4.595, 1.051, -0.38, 0.74, 3.96, 0.26
ff11 = __red(1100.0,al0,ga,c1,c2,c3,c4)
ff12 = __red(1200.0,al0,ga,c1,c2,c3,c4)
slope=(ff12-ff11)/100.0
lambda0 = np.array([3650, 4000, 4400, 5000, 5530, \
ff11 = __red(1100.0, al0, ga, c1, c2, c3, c4)
ff12 = __red(1200.0, al0, ga, c1, c2, c3, c4)
slope = (ff12-ff11)/100.0
lambda0 = np.array([3650, 4000, 4400, 5000, 5530,
6700, 9000, 10000, 20000, 100000], dtype=float)
kR = np.array([1.58, 1.45, 1.32, 1.13, 1.00, \
0.74, 0.46, 0.38, 0.11, 0.00],dtype=float)
kR = np.array([1.58, 1.45, 1.32, 1.13, 1.00,
0.74, 0.46, 0.38, 0.11, 0.00], dtype=float)
fun = interp1d(lambda0, kR, kind='linear')
sw0 = sw[sw<1200.0]
sw0 = sw[sw < 1200.0]
A_lambda0 = (ff11+(sw0-1100.0)*slope)/Rv+1.0
sw1 = sw[np.logical_and(sw>=1200.0, sw<=3650.0)]
ff = __red(sw1,al0,ga,c1,c2,c3,c4)
sw1 = sw[np.logical_and(sw >= 1200.0, sw <= 3650.0)]
ff = __red(sw1, al0, ga, c1, c2, c3, c4)
A_lambda1 = ff/Rv+1.0
sw2 = sw[np.logical_and(sw>3650.0, sw<=100000.0)]
sw2 = sw[np.logical_and(sw > 3650.0, sw <= 100000.0)]
A_lambda2 = fun(sw2)
A_lambda3 = sw[sw>100000.0]*0.0
A_lambda = av*np.hstack([A_lambda0,A_lambda1,A_lambda2,A_lambda3])
A_lambda[A_lambda<0.0] = 0.0
A_lambda3 = sw[sw > 100000.0]*0.0
A_lambda = av*np.hstack([A_lambda0, A_lambda1, A_lambda2, A_lambda3])
A_lambda[A_lambda < 0.0] = 0.0
flux = sf*10**(-0.4*A_lambda)
elif model==3: # Fitzpatrick (1986) for the Large Magellanic Cloud (LMC)
Rv=3.1
elif model == 3: # Fitzpatrick (1986) for the Large Magellanic Cloud (LMC)
Rv = 3.1
al0, ga, c1, c2, c3, c4 = 4.608, 0.994, -0.69, 0.89, 2.55, 0.50
ff11 = __red(1100.0,al0,ga,c1,c2,c3,c4)
ff12 = __red(1200.0,al0,ga,c1,c2,c3,c4)
slope=(ff12-ff11)/100.0
lambda0 = np.array([3330, 3650, 4000, 4400, 5000, 5530, \
ff11 = __red(1100.0, al0, ga, c1, c2, c3, c4)
ff12 = __red(1200.0, al0, ga, c1, c2, c3, c4)
slope = (ff12-ff11)/100.0
lambda0 = np.array([3330, 3650, 4000, 4400, 5000, 5530,
6700, 9000, 10000, 20000, 100000], dtype=float)
kR = np.array([1.682, 1.58, 1.45, 1.32, 1.13, 1.00, \
0.74, 0.46, 0.38, 0.11, 0.00],dtype=float)
kR = np.array([1.682, 1.58, 1.45, 1.32, 1.13, 1.00,
0.74, 0.46, 0.38, 0.11, 0.00], dtype=float)
fun = interp1d(lambda0, kR, kind='linear')
sw0 = sw[sw<1200.0]
sw0 = sw[sw < 1200.0]
A_lambda0 = (ff11+(sw0-1100.0)*slope)/Rv+1.0
sw1 = sw[np.logical_and(sw>=1200.0, sw<=3330.0)]
ff = __red(sw1,al0,ga,c1,c2,c3,c4)
sw1 = sw[np.logical_and(sw >= 1200.0, sw <= 3330.0)]
ff = __red(sw1, al0, ga, c1, c2, c3, c4)
A_lambda1 = ff/Rv+1.0
sw2 = sw[np.logical_and(sw>3330.0, sw<=100000.0)]
sw2 = sw[np.logical_and(sw > 3330.0, sw <= 100000.0)]
A_lambda2 = fun(sw2)
A_lambda3 = sw[sw>100000.0]*0.0
A_lambda = av*np.hstack([A_lambda0,A_lambda1,A_lambda2,A_lambda3])
A_lambda[A_lambda<0.0] = 0.0
A_lambda3 = sw[sw > 100000.0]*0.0
A_lambda = av*np.hstack([A_lambda0, A_lambda1, A_lambda2, A_lambda3])
A_lambda[A_lambda < 0.0] = 0.0
flux = sf*10**(-0.4*A_lambda)
elif model==4: # Prevot et al (1984) and Bouchet (1985) for the Small Magellanic Cloud (SMC)
# Prevot et al (1984) and Bouchet (1985) for the Small Magellanic Cloud (SMC)
elif model == 4:
Rv = 2.72
lambda0 = np.array([1275, 1330, 1385, 1435, 1490, 1545, \
1595, 1647, 1700, 1755, 1810, 1860, \
1910, 2000, 2115, 2220, 2335, 2445, \
2550, 2665, 2778, 2890, 2995, 3105, \
lambda0 = np.array([1275, 1330, 1385, 1435, 1490, 1545,
1595, 1647, 1700, 1755, 1810, 1860,
1910, 2000, 2115, 2220, 2335, 2445,
2550, 2665, 2778, 2890, 2995, 3105,
3704, 4255, 5291, 12500, 16500, 22000], dtype=float)
kR = np.array([13.54, 12.52, 11.51, 10.80, 9.84, 9.28, \
9.06, 8.49, 8.01, 7.71, 7.17, 6.90, 6.76, \
6.38, 5.85, 5.30, 4.53, 4.24, 3.91, 3.49, \
3.15, 3.00, 2.65, 2.29, 1.81, 1.00, 0.00, \
-2.02, -2.36, -2.47],dtype=float)
kR = np.array([13.54, 12.52, 11.51, 10.80, 9.84, 9.28,
9.06, 8.49, 8.01, 7.71, 7.17, 6.90, 6.76,
6.38, 5.85, 5.30, 4.53, 4.24, 3.91, 3.49,
3.15, 3.00, 2.65, 2.29, 1.81, 1.00, 0.00,
-2.02, -2.36, -2.47], dtype=float)
kR = kR/Rv+1.0
ext0 = InterpolatedUnivariateSpline(lambda0, kR, k=1)
A_lambda = av*ext0(sw)
A_lambda[A_lambda<0.0] = 0.0
A_lambda[A_lambda < 0.0] = 0.0
flux = sf*10**(-0.4*A_lambda)
elif model==5: # Calzetti et al (2000) for starburst galaxies
elif model == 5: # Calzetti et al (2000) for starburst galaxies
Rv = 4.05
sw = sw*1.0e-04 #wavelength in microns
sw = sw*1.0e-04 # wavelength in microns
fun1 = lambda x: 2.659*(-2.156+1.509/x-0.198/x**2+0.011/x**3)+Rv
fun2 = lambda x: 2.659*(-1.857+1.040/x)+Rv
def fun1(x): return 2.659*(-2.156+1.509/x-0.198/x**2+0.011/x**3)+Rv
def fun2(x): return 2.659*(-1.857+1.040/x)+Rv
ff11, ff12 = fun1(0.11), fun1(0.12)
slope1=(ff12-ff11)/0.01
slope1 = (ff12-ff11)/0.01
ff99, ff100 = fun2(2.19), fun2(2.2)
slope2=(ff100-ff99)/0.01
slope2 = (ff100-ff99)/0.01
sw0 = sw[sw<0.12]
sw1 = sw[np.logical_and(sw>=0.12, sw<=0.63)]
sw2 = sw[np.logical_and(sw>0.63, sw<=2.2)]
sw3 = sw[sw>2.2]
sw0 = sw[sw < 0.12]
sw1 = sw[np.logical_and(sw >= 0.12, sw <= 0.63)]
sw2 = sw[np.logical_and(sw > 0.63, sw <= 2.2)]
sw3 = sw[sw > 2.2]
k_lambda0 = ff11+(sw0-0.11)*slope1
k_lambda1, k_lambda2 = fun1(sw1), fun2(sw2)
k_lambda3 = ff99+(sw3-2.19)*slope2
A_lambda = av*np.hstack([k_lambda0,k_lambda1,k_lambda2,k_lambda3])/Rv
A_lambda[A_lambda<0.0] = 0.0
A_lambda = av*np.hstack([k_lambda0, k_lambda1,
k_lambda2, k_lambda3])/Rv
A_lambda[A_lambda < 0.0] = 0.0
flux = sf*10**(-0.4*A_lambda)
elif model==6: # Reddy et al (2015) for satr forming galaxies
elif model == 6: # Reddy et al (2015) for satr forming galaxies
Rv = 2.505
sw = sw*1.0e-04
fun1 = lambda x: -5.726+4.004/x-0.525/x**2+0.029/x**3+Rv
fun2 = lambda x: -2.672-0.010/x+1.532/x**2-0.412/x**3+Rv
def fun1(x): return -5.726+4.004/x-0.525/x**2+0.029/x**3+Rv
def fun2(x): return -2.672-0.010/x+1.532/x**2-0.412/x**3+Rv
ff11, ff12 = fun1(0.14), fun1(0.15)
slope1=(ff12-ff11)/0.01
slope1 = (ff12-ff11)/0.01
ff99, ff100 = fun2(2.84), fun2(2.85)
slope2=(ff100-ff99)/0.01
slope2 = (ff100-ff99)/0.01
sw0 = sw[sw<0.15]
sw1 = sw[np.logical_and(sw>=0.15, sw<0.60)]
sw2 = sw[np.logical_and(sw>=0.60, sw<2.85)]
sw3 = sw[sw>=2.85]
sw0 = sw[sw < 0.15]
sw1 = sw[np.logical_and(sw >= 0.15, sw < 0.60)]
sw2 = sw[np.logical_and(sw >= 0.60, sw < 2.85)]
sw3 = sw[sw >= 2.85]
k_lambda0 = ff11+(sw0-0.14)*slope1
k_lambda1, k_lambda2 = fun1(sw1), fun2(sw2)
k_lambda3 = ff99+(sw3-2.84)*slope2
A_lambda = av*np.hstack([k_lambda0,k_lambda1,k_lambda2,k_lambda3])/Rv
A_lambda[A_lambda<0.0] = 0.0
flux = sf*10**(-0.4*A_lambda)
A_lambda = av*np.hstack([k_lambda0, k_lambda1,
k_lambda2, k_lambda3])/Rv
A_lambda[A_lambda < 0.0] = 0.0
flux = sf*10**(-0.4*A_lambda)
else:
raise ValueError("!!! Please select a proper reddening model")
......@@ -349,25 +368,30 @@ def reddening(sw, sf, av=0.0, model=0):
return flux
###########################################
def __red(alan,al0,ga,c1,c2,c3,c4):
fun1 = lambda x: c3/(((x-(al0**2/x))**2)+ga*ga)
fun2 = lambda x,cc: cc*(0.539*((x-5.9)**2)+0.0564*((x-5.9)**3))
fun = lambda x,cc: c1+c2*x+fun1(x)+fun2(x,cc)
ala = alan*1.0e-04 #wavelength in microns
def __red(alan, al0, ga, c1, c2, c3, c4):
def fun1(x): return c3/(((x-(al0**2/x))**2)+ga*ga)
def fun2(x, cc): return cc*(0.539*((x-5.9)**2)+0.0564*((x-5.9)**3))
def fun(x, cc): return c1+c2*x+fun1(x)+fun2(x, cc)
ala = alan*1.0e-04 # wavelength in microns
p = 1.0/ala
if np.size(p)>1:
p1, p2 = p[p>=5.9], p[p<5.9]
ff = np.append(fun(p1,c4), fun(p2,0.0))
elif np.size(p)==1:
if p<5.9: c4 = 0.0
if np.size(p) > 1:
p1, p2 = p[p >= 5.9], p[p < 5.9]
ff = np.append(fun(p1, c4), fun(p2, 0.0))
elif np.size(p) == 1:
if p < 5.9:
c4 = 0.0
ff = fun(p, c4)
else:
return
return ff
###########################################
def sed2mag(mag_i, sedCat, filter_list, redshift=0.0, av=0.0, redden=0):
# load the filters
......@@ -378,19 +402,23 @@ def sed2mag(mag_i, sedCat, filter_list, redshift=0.0, av=0.0, redden=0):
if filter_list[k].filter_type == 'i':
nid = k
bandpass = filter_list[k].bandpass_full
ktrans = np.transpose(np.array([bandpass.wave_list*10.0, bandpass.func(bandpass.wave_list)]))
aflux[k], isedObs = tflux(ktrans, sedCat, redshift=redshift, av=av, redden=redden)
ktrans = np.transpose(
np.array([bandpass.wave_list*10.0, bandpass.func(bandpass.wave_list)]))
aflux[k], isedObs = tflux(
ktrans, sedCat, redshift=redshift, av=av, redden=redden)
# normalize to i-band
aflux = aflux / aflux[nid]
# magnitudes in all filters
amag = -2.5*np.log10(aflux) + mag_i
spec = galsim.LookupTable(x=np.array(isedObs[0]), f=np.array(isedObs[1]), interpolant='nearest')
spec = galsim.LookupTable(x=np.array(isedObs[0]), f=np.array(
isedObs[1]), interpolant='nearest')
isedObs = galsim.SED(spec, wave_type='A', flux_type='1', fast=False)
return amag, isedObs
def eObs(e1,e2,g1,g2):
def eObs(e1, e2, g1, g2):
"""
Calculate the sheared (observed) ellipticity using the
intrinsic ellipticity and cosmic shear components.
......@@ -424,7 +452,7 @@ def eObs(e1,e2,g1,g2):
e = complex(e1[i], e2[i])
g = complex(g1[i], g2[i])
e, gg = abs(e), abs(g)
if gg<=1.0:
if gg <= 1.0:
tt = e + g
bb = 1.0 + e*g.conjugate()
eobs = tt/bb
......@@ -432,27 +460,34 @@ def eObs(e1,e2,g1,g2):
tt = 1.0 + g*e.conjugate()
bb = e.conjugate() + g.conjugate()
eobs = tt/bb
# derive the orientation
dd = 0.5*np.arctan(abs(eobs.imag/eobs.real))*180.0/np.pi
if eobs.imag>0 and eobs.real>0: dd = dd
if eobs.imag>0 and eobs.real<0: dd = 90.0 - dd
if eobs.imag<0 and eobs.real>0: dd = 0.0 - dd
if eobs.imag<0 and eobs.real<0: dd = dd - 90.0
if eobs.imag > 0 and eobs.real > 0:
dd = dd
if eobs.imag > 0 and eobs.real < 0:
dd = 90.0 - dd
if eobs.imag < 0 and eobs.real > 0:
dd = 0.0 - dd
if eobs.imag < 0 and eobs.real < 0:
dd = dd - 90.0
e1obs += [eobs.real]
e2obs += [eobs.imag]
eeobs += [abs(eobs)]
theta += [dd]
e1obs,e2obs,eeobs,theta = np.array(e1obs),np.array(e2obs),np.array(eeobs),np.array(theta)
if nobj == 1: e1obs,e2obs,eeobs,theta = e1obs[0],e2obs[0],eeobs[0],theta[0]
e1obs, e2obs, eeobs, theta = np.array(e1obs), np.array(
e2obs), np.array(eeobs), np.array(theta)
if nobj == 1:
e1obs, e2obs, eeobs, theta = e1obs[0], e2obs[0], eeobs[0], theta[0]
return e1obs, e2obs, eeobs, theta
def getObservedSED(sedCat, redshift=0.0, av=0.0, redden=0):
z = redshift + 1.0
sw, sf = sedCat[:,0], sedCat[:,1]
sw, sf = sedCat[:, 0], sedCat[:, 1]
# reddening
sf = reddening(sw, sf, av=av, model=redden)
# sw, sf = sw*z, sf*(z**3)
......@@ -464,28 +499,33 @@ def getObservedSED(sedCat, redshift=0.0, av=0.0, redden=0):
isedObs = (sw.copy(), sf.copy())
return isedObs
def integrate_sed_bandpass(sed, bandpass):
wave = np.linspace(bandpass.blue_limit, bandpass.red_limit, 1000) # in nm
wave = np.linspace(bandpass.blue_limit, bandpass.red_limit, 1000) # in nm
flux_normalized = sed(wave)*bandpass(wave)
# print('in integrate_sed_bandpass', bandpass.blue_limit, bandpass.red_limit)
int_flux = np.trapz(y=flux_normalized, x=wave) * 10. # convert to photons s-1 m-2 A-1
int_flux = np.trapz(y=flux_normalized, x=wave) * \
10. # convert to photons s-1 m-2 A-1
return int_flux
def getABMAG(interFlux, bandpass):
throughtput = Table(np.array(np.array([bandpass.wave_list*10.0, bandpass.func(bandpass.wave_list)])).T, names=(['WAVELENGTH', 'SENSITIVITY']))
throughtput = Table(np.array(np.array([bandpass.wave_list*10.0, bandpass.func(
bandpass.wave_list)])).T, names=(['WAVELENGTH', 'SENSITIVITY']))
sWave = bandpass.blue_limit*10.0
eWave = bandpass.red_limit*10.0
# print('in getABMAG', sWave, eWave)
ABMAG_zero = getABMagAverageVal(
ABmag=0,
norm_thr=throughtput,
sWave=sWave,
ABmag=0,
norm_thr=throughtput,
sWave=sWave,
eWave=eWave)
flux_ave = interFlux / (eWave-sWave)
ABMAG_spec = -2.5 * np.log10(flux_ave/ABMAG_zero)
return ABMAG_spec
def getABMagAverageVal(ABmag=20.,norm_thr=None, sWave=6840, eWave=8250):
def getABMagAverageVal(ABmag=20., norm_thr=None, sWave=6840, eWave=8250):
"""
norm_thr: astropy.table, 2 colum, 'WAVELENGTH', 'SENSITIVITY'
......@@ -496,15 +536,16 @@ def getABMagAverageVal(ABmag=20.,norm_thr=None, sWave=6840, eWave=8250):
inverseLambda = norm_thr['SENSITIVITY']/norm_thr['WAVELENGTH']
norm_thr_i = interpolate.interp1d(norm_thr['WAVELENGTH'], inverseLambda)
x = np.linspace(sWave,eWave, int(eWave)-int(sWave)+1)
x = np.linspace(sWave, eWave, int(eWave)-int(sWave)+1)
y = norm_thr_i(x)
AverageLamdaInverse = np.trapz(y,x)/(eWave-sWave)
AverageLamdaInverse = np.trapz(y, x)/(eWave-sWave)
norm = 54798696332.52474 * pow(10.0, -0.4 * ABmag) * AverageLamdaInverse
# print('AverageLamdaInverse = ', AverageLamdaInverse)
# print('norm = ', norm)
return norm
def getNormFactorForSpecWithABMAG(ABMag=20., spectrum=None, norm_thr=None, sWave=6840, eWave=8250):
"""
Use AB magnitude system (zero point, fv = 3631 janskys) in the normal band(norm_thr) normalize the spectrum by inpute ABMag
......@@ -520,17 +561,19 @@ def getNormFactorForSpecWithABMAG(ABMag=20., spectrum=None, norm_thr=None, sWave
the normalization factor flux of AB system(fix a band and magnitude ) /the flux of inpute spectrum(fix a band)
"""
spectrumi = interpolate.interp1d(spectrum['WAVELENGTH'], spectrum['FLUX'])
norm_thri = interpolate.interp1d(norm_thr['WAVELENGTH'], norm_thr['SENSITIVITY'])
norm_thri = interpolate.interp1d(
norm_thr['WAVELENGTH'], norm_thr['SENSITIVITY'])
x = np.linspace(sWave,eWave, int(eWave)-int(sWave)+1)
x = np.linspace(sWave, eWave, int(eWave)-int(sWave)+1)
y_spec = spectrumi(x)
y_thr = norm_thri(x)
y = y_spec*y_thr
specAve = np.trapz(y,x)/(eWave-sWave)
norm = getABMagAverageVal(ABmag=ABMag, norm_thr=norm_thr, sWave=sWave, eWave=eWave)
specAve = np.trapz(y, x)/(eWave-sWave)
norm = getABMagAverageVal(
ABmag=ABMag, norm_thr=norm_thr, sWave=sWave, eWave=eWave)
if specAve == 0:
return 0
......@@ -551,12 +594,14 @@ def tag_sed(h5file, model_tag, teff=5000, logg=2, feh=0):
close_feh = 99
else:
close_feh = feh_grid[np.argmin(np.abs(feh_grid - feh))]
path = model_tag_str + f"_teff_{close_teff:.1f}_logg_{close_logg:.2f}_feh_{close_feh:.1f}"
path = model_tag_str + \
f"_teff_{close_teff:.1f}_logg_{close_logg:.2f}_feh_{close_feh:.1f}"
wave = np.array(h5file["wave"][model_tag_str][()]).ravel()
flux = np.array(h5file["sed"][path][()]).ravel()
return path, wave, flux
def convolveGaussXorders(img=None, sigma = 1):
def convolveGaussXorders(img=None, sigma=1):
from astropy.modeling.models import Gaussian2D
from scipy import signal
offset = int(np.ceil(sigma*10))
......@@ -571,14 +616,13 @@ def convolveGaussXorders(img=None, sigma = 1):
convImg = signal.fftconvolve(img, psf, mode='full', axes=None)
return convImg, offset
def convolveImg(img=None, psf = None):
def convolveImg(img=None, psf=None):
from astropy.modeling.models import Gaussian2D
from scipy import signal
convImg = signal.fftconvolve(img, psf, mode='full', axes=None)
offset_x = int(psf.shape[1]/2. + 0.5) - 1
offset_y = int(psf.shape[0]/2. + 0.5) - 1
offset = [offset_x,offset_y]
offset = [offset_x, offset_y]
return convImg, offset
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment