Commit 5b7bab9d authored by Zhang Xin's avatar Zhang Xin
Browse files

init

parent 8f5f5a9b
File added
# -*- coding: utf-8 -*-
#this code assembles original BT-Settl SED into fits files.
from __future__ import print_function
import os, random, glob
#import fortranformat as ff
import time
import scipy
from scipy.ndimage.filters import gaussian_filter1d
#from scipy import integrate
import sys
#import subprocess
import numpy as np
import astropy
from astropy.io import fits #ascii, fits
from astropy.table import Table, Column
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from scipy.signal import savgol_filter
from spectres import spectres
import extinction
#wave in Angstrom
#flam in log
#The original BT SED are computed with parallel computation (4 cpus). So the wave grids needs to be sorted.
#Different SEDs may have different wave grids. Actually, it might be just that some of them are missing some wave points.
#I may pre-define a wave grid, like the Kurucz one, and interpolate them onto this grid.
Cspeed=2.997950e18 #A/s
SB=5.66961e-5 #Stefan-Boltzmann constant in cgs
Nmax=999 #fits maximumm column
Check=True
DF= -8.0
def assemb_BT(wv_grid=None, lib_root_dir='/dev/null'):
print('By comparing Phoenix with Kurucz, the original Phoenix spectra should be normalized!')
NORM=True
BT_pm=['+0.5_a+0.0','+0.3_a+0.0','-0.0_a+0.0','-0.5_a+0.2','-1.0_a+0.4','-1.5_a+0.4','-2.0_a+0.4','-2.5_a+0.4','-3.0_a+0.4','-3.5_a+0.4','-4.0_a+0.4'] #['-0.0_a+0.0']
checkf=open('check.txt','w')
checkf.write("#BT Teffi loggi ratio\n")
#lib_root_dir='/data4/SED/work/stellar_spectra_lib/BT-Settl/AGSS2009'
#lib_root_dir='/home/chen/datashare/BT-Settl/AGSS2009'
#if (not os.path.isdir(lib_root_dir)): lib_root_dir=lib_root_dir.replace('/media/chen/','/run/media/cycy/')
dir_str='BT-Settl_MZZ_hot'
ext_str='lte*ZZ.BT-Settl.7.gz' #lte037-4.5-0.0a+0.0.BT-Settl.7.gz
wv0=[] #all spectra to be interpolated onto the same
if ((wv_grid != None) and ('Regular' not in wv_grid)):
wv0=open(wv_grid,'r').readlines()[1:]
wv0=' '.join(wv0).replace('\n','').split()
wv0=np.array([float(i) for i in wv0])
print('min&max of wv0:', min(wv0), max(wv0))
logwv0=np.log10(wv0)
if ('Regular' in wv_grid):
wv_start,wv_end,wv_R,tmp = wv_grid.split(' ')
#logwv_start=np.log10(float(wv_start)); logwv_end=np.log10(float(wv_end)); wv_R=float(wv_R)
#logwv0=np.arange(logwv_start, logwv_end, 1./wv_R)
#if (logwv0[-1] < logwv_end): logwv0=np.append(logwv0, logwv0[-1]+1./wv_R)
#wv0=10.**logwv0
wv_start=float(wv_start); wv_end=float(wv_end); wv_R=float(wv_R)
wv0=np.array([wv_start])
while (wv0[-1] < wv_end): wv0=np.append(wv0, wv0[-1]+wv0[-1]/wv_R)
logwv0=np.log10(wv0)
#extinction vector
Al0 = extinction.odonnell94(wv0, 1.0, 3.1)
hduo=fits.BinTableHDU.from_columns(
[fits.Column(name='wave', format='E', array=wv0),
fits.Column(name='Al0', format='E', array=Al0)])
hduo.writeto('Ext_odonnell94_R3.1_CSST_R'+str(int(wv_R))+'.fits')
for BT in BT_pm:
ori_Tls=[]; ori_Gls=[] #tls=[]; gls=[];
ori_cols=[]; fname=[]
final_cols=[]; final_Tls=[]; final_Gls=[]; final_fname=[]
outf='BT-Settl_M'+BT+'.fits'
glob_str=os.path.join(lib_root_dir, dir_str.replace('ZZ', BT), ext_str.replace('ZZ', BT).replace('_',''))
print(glob_str)
spefs=glob.glob(glob_str)
print('nspe:', len(spefs))
nn=0
#sort the names according to Teff and logg
spefs.sort( key = lambda spef: (float(os.path.split(spef)[-1][3:6]+'00'), -float(os.path.split(spef)[-1][6:10])) )
for spef in spefs:
tmp=os.path.split(spef)[-1]
t=tmp[3:6]+'00'
#if (int(t) > 22000): continue
g=tmp[6:10]
ori_Tls.append(int(t))
Teffi=float(t)
ori_Gls.append(-float(g)) #take care, '-' here
loggi=-float(g)
#fname.append(os.path.join(dir_str.replace('ZZ', BT), tmp))
fname.append(os.path.basename(spef))
nn=nn+1
#if (nn > 5): break
#print spef
spe=np.genfromtxt(spef,dtype=None,names=['w','flam'],invalid_raise=False,usecols=(0,1), encoding=None) #dtype must be set to None, otherwise nan output
wv=np.array([float(xx.replace('D','E')) for xx in spe['w']]) #need to upgrade numpy: sudo easy_install --upgrade numpy
flam=np.array([float(xx.replace('D','E'))+DF for xx in spe['flam']])
#print wv[0], flam[0]
print('min&max Wv:', min(wv), max(wv))
inds = wv.argsort()
wv = wv[inds]
flam=flam[inds]
if (nn==1):
if (wv_grid == None and len(wv0) == 0):
wv0=wv
logwv0=np.log10(wv0)
#cols.append(fits.Column(name='wave', format='E', array=wv0, unit='A'))
#if (not (wv0 == wv).all()): #actually they have different number of wave points
#print 'different wv.'
#exit(1)
flam=10.**flam
flam[np.isneginf(flam)] = 0.
flam[np.isposinf(flam)] = 0.
flam[np.isnan(flam)] = 0.
if NORM==True:
flam=flam*SB*Teffi**4./np.trapz(flam,wv)
if Check:
ratio=np.trapz(flam,wv)/SB/Teffi**4. #Flux=pi*I_average
#if (abs(ratio-1)>0.01):
checkf.write('{0:12s} {1:f} {2:f} {3:f}\n'.format(BT, Teffi, loggi, ratio))
#interpolate onto the same grid if the wave grid not identical
if wv_grid!=None:
#interpolation conserving the energy
#wv_extend=wv0[wv0>max(wv)]
#if (len(wv_extend)>0):
# wv=np.insert(wv, len(wv), wv_extend)
# flam=np.insert(flam, len(flam), wv_extend*0)
flam=spectres(wv, flam, wv0)
elif not np.array_equal(wv0, wv):
print("different wave grids")
flam=spectres(wv, flam, wv0)
flam[np.isneginf(flam)] = 0.
flam[np.isposinf(flam)] = 0.
flam[np.isnan(flam)] = 0.
#flam[flam<0.] = 0.
#ori_cols.append(fits.Column(name='f'+str(nn), format='E', array=flam, unit='erg/s/A'))
ori_cols.append(flam)
#if Check:
# ratio=np.trapz(flam,wv0)/SB/Teffi**4. #Flux=pi*I_average
# if (abs(ratio-1)>0.01):
# print 'spec, Teff, logg:', BT, Teffi, loggi
# print 'fk/SB/T^4:', ratio
#ori_cols.append(np.zeros(len(wv0)))
#make regular grid
uniq_Tls=np.unique(ori_Tls)
uniq_Gls=np.unique(ori_Gls)
ori_Tls=np.array(ori_Tls)
ori_Gls=np.array(ori_Gls)
nnn=0
final_cols.append(fits.Column(name='wave', format='E', array=wv0, unit='A'))
for Ti in uniq_Tls:
ids_T=list(filter(lambda x: ori_Tls[x] == Ti, range(len(ori_Tls))))
for Gi in uniq_Gls:
nnn=nnn+1
ids=np.where(abs( abs(ori_Gls[ids_T]-Gi) - np.min(abs(ori_Gls[ids_T]-Gi))) < 1E-5)[0]
ids_TG=np.array(ids_T)[ids]
if (len(ids_TG) == 1):
final_cols.append(fits.Column(name='f'+str(nnn), format='E', array=ori_cols[ids_TG[0]], unit='erg/s/A'))
final_fname.append(fname[ids_TG[0]])
elif (len(ids_TG) == 2):
final_cols.append(fits.Column(name='f'+str(nnn), format='E',
array=(ori_cols[ids_TG[0]]+ori_cols[ids_TG[1]])/2., unit='erg/s/A'))
final_fname.append(fname[ids_TG[0]]+'_'+fname[ids_TG[1]])
else:
print("error! no model with Teff=",Ti)
sys.exit()
final_Tls.append(Ti)
final_Gls.append(Gi)
#final_cols.append()
#final_fname.append()
col1=fits.Column(name='Teff', format='E', array=final_Tls, unit='K')
col2=fits.Column(name='logg', format='E', array=final_Gls)
col3=fits.Column(name='specf', format='80A', array=list(final_fname))
cols1 = fits.ColDefs([col1, col2, col3])
#tab1=fits.BinTableHDU.from_columns(cols1)
#hdu = fits.PrimaryHDU()
#thdulist = fits.HDUList([hdu, tab1])
#thdulist.writeto(outf)
tab1=fits.BinTableHDU.from_columns(cols1)
#tab2=fits.BinTableHDU.from_columns(cols)
tabs=[fits.BinTableHDU.from_columns(final_cols[i*Nmax:(i+1)*Nmax]) for i in range((len(final_cols)+Nmax-1)//Nmax) ]
hdu = fits.PrimaryHDU()
#thdulist = fits.HDUList([hdu, tab1, tab2])
thdulist = fits.HDUList([hdu, tab1]+tabs)
thdulist.writeto(outf)
checkf.close()
#wv_grid="/media/chen/SED/N/stellar_spectra_lib/KURUCZ_related/castelli/Kurucz.wv"
#if (not os.path.isfile(wv_grid)): wv_grid=wv_grid.replace('/media/chen/','/run/media/cycy/')
#assemb_BT(wv_grid=wv_grid)
assemb_BT(wv_grid='1500. 12000. 1000. Regular', lib_root_dir='/home/chen/datashare/BT-Settl/AGSS2009')
#python /media/chen/SED/work/lib/spectra_assemble/assemb_BT.py
#python /run/media/cycy/SED/work/lib/spectra_assemble/assemb_BT.py
File added
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
'''
Author: xin zhangxinbjfu@gmail.com
Date: 2020-05-25 14:07:48
LastEditors: xin zhangxinbjfu@gmail.com
LastEditTime: 2022-06-07 12:45:22
FilePath: /src/Users/zhangxin/Work/SlitlessSim/sed/produceSED_bycatfile/data/throughputs/CSST/toFitsTable.py
Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
'''
from astropy.table import Table
from astropy.io import fits
from pylab import *
fil = ['nuv','u','g','r','i','z','y']
for f in fil:
d = loadtxt(f+'_throughput.txt')
d[:,0] = d[:,0]
d[:,1] = d[:,1]
t= Table(d, names=('WAVELENGTH', 'SENSITIVITY'))
t.write(f+'.Throughput.fits', format='fits')
# d[:,0] = d[:,0]
# d[:,1] = d[:,1]*0.1
# t= Table(d, names=('WAVELENGTH', 'SENSITIVITY'))
# t.write('GI.Throughput.0st.fits', format='fits')
# d = loadtxt('GU_1st.dat')
This diff is collapsed.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment