"git@csst-tb.bao.ac.cn:csst-cicd/csst-dag.git" did not exist on "9c2710ee3169bdf7357e66bb6b4692bfd7570241"
Commit 5e418b7c authored by BO ZHANG's avatar BO ZHANG 🏀
Browse files

rewrite csst-dag as a dependency package of csst-pipeline-cli

parent 1400266d
{
"dag_group": "csst-msc-l2-mbi-mosaic",
"dag_group_run": "195244ff176f923aec9a9328c75ecaeb4a8c4345",
"dag": "csst-msc-l2-mbi-mosaic",
"dag_run": "c89d7e7a022e6f0cdf1daff921c29dbce0ac7c01",
"batch_id": "inttest",
"priority": 1,
"created_time": "1970-01-01T00:00:00",
"rerun": 0,
"dataset": "csst-msc-c9-25sqdeg-v3",
"instrument": "MSC",
"obs_type": "WIDE",
"obs_group": "W2",
"obs_id": "",
"detector": "",
"filter": "r",
"custom_id": "123456",
"data_list": [],
"extra_kwargs": {},
"pmapname": "",
"ref_cat": "",
"n_file_expected": -1,
"n_file_found": -1
}
name: csst-msc-l1-mbi-mosaic
tasks:
- name: MOSAIC
image: csst-msc-l1-mbi-mosaic
{
"dag_group": "csst-msc-l2-mbi-xcat",
"dag_group_run": "195244ff176f923aec9a9328c75ecaeb4a8c4345",
"dag": "csst-msc-l2-mbi-xcat",
"dag_run": "c89d7e7a022e6f0cdf1daff921c29dbce0ac7c01",
"batch_id": "inttest",
"priority": 1,
"created_time": "1970-01-01T00:00:00",
"rerun": 0,
"dataset": "csst-msc-c9-25sqdeg-v3",
"instrument": "MSC",
"obs_type": "WIDE",
"obs_group": "W2",
"obs_id": "",
"detector": "",
"filter": "r",
"custom_id": "123456",
"data_list": [],
"extra_kwargs": {},
"pmapname": "",
"ref_cat": "",
"n_file_expected": -1,
"n_file_found": -1
}
name: csst-msc-l1-mbi-xcat
tasks:
- name: XCAT
image: csst-msc-l1-mbi-xcat
{
"dataset": "",
"instrument": "",
"obs_type": "",
"obs_group": "",
"obs_id": "",
"detector": "",
"filter": "",
"custom_id": "",
"batch_id": "",
"pmapname": "",
"ref_cat": "",
"dag_group": "",
"dag": "",
"dag_group_run": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"dag_run": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"priority": 1,
"data_list": [],
"extra_kwargs": {},
"created_time": "1970-01-01T00:00:00",
"rerun": -1,
"status_code": -1024,
"n_file_expected": -1,
"n_file_found": -1,
"object": "",
"proposal_id": ""
}
\ No newline at end of file
import os
from astropy import table
import socket
from typing import Optional
from urllib.parse import urlparse
import csst_fs
import numpy as np
from astropy import table
from csst_dfs_client import plan, level0, level1, catalog
from ._csst import csst
from .utils import override_common_keys
# -----------------
# INPUT PARAMETERS
# -----------------
DFS1_PLAN_PARAMS = {
"dataset": None,
"instrument": None,
"obs_type": None,
"obs_group": None,
"obs_id": None,
"proposal_id": None,
}
DFS1_LEVEL0_PARAMS = {
"dataset": None,
"instrument": None,
"obs_type": None,
"obs_group": None,
"obs_id": None,
"detector": None,
"filter": None,
"prc_status": None,
"qc_status": None,
# "data_model"
}
DFS1_LEVEL1_PARAMS = {
"dataset": None,
"instrument": None,
"obs_type": None,
"obs_group": None,
"obs_id": None,
"detector": None,
"prc_status": None,
"qc_status": None,
# special keys for data products
"data_model": None,
"batch_id": "default_batch",
# "build": None,
# "pmapname": None,
}
# PROC_PARAMS = {
# "priority": 1,
# "batch_id": "default_batch",
# "pmapname": "pmapname",
# "final_prc_status": -2,
# "demo": False,
# # should be capable to extend
# }
# -----------------
# OUTPUT PARAMETERS
# -----------------
# plan basis keys
DFS1_PLAN_BASIS_KEYS = (
"dataset",
"instrument",
"obs_type",
"obs_group",
"obs_id",
"n_file",
"_id",
)
# data basis keys
DFS1_LEVEL0_BASIS_KEYS = (
"dataset",
"instrument",
"obs_type",
"obs_group",
"obs_id",
"detector",
"file_name",
"_id",
"prc_status",
"qc_status",
)
DFS1_LEVEL1_BASIS_KEYS = (
"dataset",
"instrument",
"obs_type",
"obs_group",
"obs_id",
"detector",
"file_name",
"_id",
"prc_status",
"qc_status",
"data_model",
"batch_id",
"build",
"pmapname",
)
# DFS2 META
DFS2_META_BASIS_KEYS = (
"dataset",
"instrument",
"obs_type",
"obs_group",
"obs_id",
"detector",
"filter",
"pmapname",
"ref_cat",
"custom_id",
"batch_id",
"dag_group",
"dag_group_run",
"dag",
"dag_run",
"priority",
"data_list",
"extra_kwargs",
"create_time",
"rerun",
"data_model",
"data_uuid",
"qc_status",
"docker_image",
"build",
"object",
"proposal_id",
"ra",
"dec",
"obs_date",
"prc_date",
)
DFS2_META_PARAMS = {k: None for k in DFS2_META_BASIS_KEYS}
def assert_env_exists(env_var: str):
"""Assert that an environment variable exists.
Parameters
----------
env_var : str
Name of the environment variable to check.
Raises
------
AssertionError
If the environment variable is not set.
"""
assert env_var in os.environ, f"Environment variable {env_var} is not set."
def check_url_accessibility(url: str, timeout=3, raise_error: bool = True):
"""Check if a URL is accessible.
Parameters
----------
url : str
URL to check.
timeout : int, optional
Timeout in seconds, by default 3.
raise_error : bool, optional
Whether to raise an error if the URL is not accessible, by default True.
Raises
------
AssertionError
If the URL is not accessible and raise_error is True.
Returns
-------
bool
True if the URL is accessible, False otherwise.
"""
try:
if not url.startswith("http"):
url = f"http://{url}"
parsed_url = urlparse(url)
ip, port = parsed_url.hostname, parsed_url.port
print(ip, port)
if port is None:
port = 80
# 创建 Socket 连接(TCP)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout) # 设置超时时间(秒)
# 尝试连接
result = sock.connect_ex((ip, port))
# 返回状态
if result == 0: # 0 表示成功
return True
else:
if raise_error:
raise AssertionError(f"URL {url} is not accessible.")
else:
return False
finally:
sock.close() # 确保关闭连接
# join_type for data x plan
PLAN_JOIN_TYPE = "inner"
"""
References:
- https://docs.astropy.org/en/stable/api/astropy.table.join.html
- https://docs.astropy.org/en/stable/table/operations.html#join
Typical types:
- inner join: Only matching rows from both tables
- left join: All rows from left table, matching rows from right table
- right join: All rows from right table, matching rows from left table
- outer join: All rows from both tables
- cartesian join: Every combination of rows from both tables
"""
# def extract_basis_table(dlist: list[dict], basis_keys: tuple) -> table.Table:
# """Extract basis key-value pairs from a list of dictionaries."""
# return table.Table([{k: d.get(k, "") for k in basis_keys} for d in dlist])
# def split_data_basis(data_basis: table.Table, n_split: int = 1) -> list[table.Table]:
# """Split data basis into n_split parts via obs_id"""
# assert (
# np.unique(data_basis["dataset"]).size == 1
# ), "Only one dataset is allowed for splitting."
# # sort
# data_basis.sort(keys=["dataset", "obs_id"])
# # get unique obsid
# u_obsid, i_obsid, c_obsid = np.unique(
# data_basis["obs_id"].data, return_index=True, return_counts=True
# )
# # set chunk size
# chunk_size = int(np.fix(len(u_obsid) / n_split))
# # initialize chunks
# chunks = []
# for i_split in range(n_split):
# if i_split < n_split - 1:
# chunks.append(
# data_basis[
# i_obsid[i_split * chunk_size] : i_obsid[(i_split + 1) * chunk_size]
# ]
# )
# else:
# chunks.append(data_basis[i_obsid[i_split * chunk_size] :])
# # np.unique(table.vstack(chunks)["_id"])
# # np.unique(table.vstack(chunks)["obs_id"])
# return chunks
class DFS:
# plan table
# dfs1_plan_find = plan.find
dfs1_level0_find = level0.find
dfs1_level1_find = level1.find
# dfs1_dag_find = dag.find
dfs1_catalog = catalog
# file search
dfs2_product_find = csst_fs.query_metadata
def __init__(self, raise_error: bool = True):
assert_env_exists("CSST_DFS_GATEWAY")
assert_env_exists("CSST_DFS_TOKEN")
assert_env_exists("CSST_BACKEND_API_URL")
# CSST_DFS_GATEWAY=10.200.60.246:28000
# CSST_BACKEND_API_URL=http://10.200.60.199:9010
# check DFS accessibility
# DFS1
check_url_accessibility(os.environ["CSST_DFS_GATEWAY"], raise_error=raise_error)
# DFS2
check_url_accessibility(
os.environ["CSST_BACKEND_API_URL"], raise_error=raise_error
)
@staticmethod
def dfs1_find_plan(**kwargs) -> table.Table:
"""Find plan data from DFS1."""
# query
prompt = "DFS1.plan"
qr_kwargs = override_common_keys(DFS1_PLAN_PARAMS, kwargs)
qr = plan.find(**qr_kwargs)
assert qr.success, qr
print(f">>> [{prompt}] query kwargs: {qr_kwargs}")
print(f">>> [{prompt}] {len(qr.data)} records found.")
# plan basis / obsid basis
try:
for _ in qr.data:
this_instrument = _["instrument"]
if this_instrument == "HSTDM":
if _["params"]["detector"] == "SIS12":
this_n_file = len(_["params"]["exposure_start"]) * 2
else:
this_n_file = len(_["params"]["exposure_start"])
else:
# count effective detectors of this instrument
this_n_file = len(csst[this_instrument].effective_detector_names)
_["n_file"] = this_n_file
except KeyError:
print(f"`n_epec_frame` is not found in {_}")
raise KeyError(f"`n_epec_frame` is not found in {_}")
return table.Table(qr.data)
@staticmethod
def dfs1_find_plan_basis(**kwargs) -> table.Table:
"""Extract plan basis from plan data."""
plan_data = DFS.dfs1_find_plan(**kwargs)
plan_basis = plan_data[DFS1_PLAN_BASIS_KEYS]
return plan_basis
@staticmethod
def dfs1_find_level0(**kwargs) -> table.Table:
# query
prompt = "DFS1.level0"
qr_kwargs = override_common_keys(DFS1_LEVEL0_PARAMS, kwargs)
qr = level0.find(**qr_kwargs)
assert qr.success, qr
print(f">>> [{prompt}] query kwargs: {qr_kwargs}")
print(f">>> [{prompt}] {len(qr.data)} records found.")
return table.Table(qr.data)
@staticmethod
def dfs1_find_level0_basis(**kwargs) -> table.Table:
level0_data = DFS.dfs1_find_level0(**kwargs)
level0_basis = level0_data[DFS1_LEVEL0_BASIS_KEYS]
return level0_basis
@staticmethod
def dfs1_find_level1(**kwargs) -> table.Table:
# query
prompt = "DFS1.level1"
qr_kwargs = override_common_keys(DFS1_LEVEL1_PARAMS, kwargs)
qr = level1.find(**qr_kwargs)
assert qr.success, qr
print(f">>> [{prompt}] query kwargs: {qr_kwargs}")
print(f">>> [{prompt}] {len(qr.data)} records found.")
return table.Table(qr.data)
@staticmethod
def dfs1_find_level1_basis(**kwargs) -> table.Table:
level1_data = DFS.dfs1_find_level1(**kwargs)
level1_basis = level1_data[DFS1_LEVEL1_BASIS_KEYS]
return level1_basis
# TODO: DFS2 META query
@staticmethod
def dfs2_find_meta(**kwargs) -> table.Table:
"""Find meta data from DFS2."""
# query
prompt = "DFS2.meta"
qr_kwargs = override_common_keys(DFS2_META_PARAMS, kwargs)
qr = csst_fs.query_metadata(**qr_kwargs)
assert qr.success, qr
print(f">>> [{prompt}] query kwargs: {qr_kwargs}")
print(f">>> [{prompt}] {len(qr.data)} records found.")
return table.Table(qr.data)
import hashlib
import random
import string
from astropy.time import Time
def generate_sha1_from_time(verbose=False):
"""
根据当前时间生成 SHA-1 哈希值,并添加随机字符串确保唯一性
Returns
-------
Tuple:
(时间戳, 随机字符串, SHA-1哈希值) 元组
"""
# 获取当前毫秒级时间戳(ISO格式)
timestamp = Time.now().isot
# 生成40个随机字母和数字
random_str = "".join(random.choices(string.ascii_letters + string.digits, k=40))
# 将时间戳和随机字符串组合
combined_str = f"{timestamp}_{random_str}"
# 生成 SHA-1 哈希
sha1 = hashlib.sha1()
sha1.update(combined_str.encode("utf-8"))
sha_value = sha1.hexdigest()
if verbose:
return timestamp, random_str, sha_value
else:
return sha_value
if __name__ == "__main__": if __name__ == "__main__":
# 生成并输出结果 # 生成并输出结果
for i in range(3): for i in range(3):
......
def override_common_keys(d1: dict, d2: dict) -> dict:
"""
Construct a new dictionary by updating the values of basis_keys that exists in the first dictionary
with the values of the second dictionary.
Parameters
----------
d1 : dict
The first dictionary.
d2 : dict
The second dictionary.
Returns
-------
dict:
The updated dictionary.
"""
return {k: d2[k] if k in d2.keys() else d1[k] for k in d1.keys()}
...@@ -6,28 +6,28 @@ build-backend = "setuptools.build_meta" ...@@ -6,28 +6,28 @@ build-backend = "setuptools.build_meta"
name = "csst_dag" name = "csst_dag"
version = "0.0.1" version = "0.0.1"
authors = [ authors = [
{name = "Bo Zhang", email = "bozhang@nao.cas.cn"} { name = "Bo Zhang", email = "bozhang@nao.cas.cn" }
] ]
description = "CSST DAG" description = "CSST DAG"
readme = { file = "README.md", content-type = "text/markdown" } readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.11" requires-python = ">=3.11"
license = {text = "MIT"} license = "MIT" # 使用SPDX许可证标识符字符串,替代已弃用的表格式[3,4](@ref)
keywords = ["astronomy", "scientific", "physics"] keywords = ["astronomy", "scientific", "physics"]
classifiers = [ classifiers = [
"Development Status :: 3 - Alpha", "Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research", "Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent", "Operating System :: OS Independent",
"Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Physics", "Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Astronomy" "Topic :: Scientific/Engineering :: Astronomy"
] ]
dependencies = [ dependencies = [
# 需手动将requirements.txt中的依赖按以下格式转换:
"astropy", "astropy",
"numpy", "numpy",
"toml", "toml",
"csst-dfs-client@git+https://csst-tb.bao.ac.cn/code/csst-dfs/csst-dfs-client.git", # 注意: 直接使用Git URL依赖在某些构建或安装环境下可能存在兼容性问题。
# 如果遇到问题,可考虑将其移至 `[project.optional-dependencies]` 或预先打包该依赖。
"csst-dfs-client @ git+https://csst-tb.bao.ac.cn/code/csst-dfs/csst-dfs-client.git",
] ]
[project.urls] [project.urls]
...@@ -35,11 +35,25 @@ homepage = "https://csst-tb.bao.ac.cn/code/csst-cicd/csst-dag" ...@@ -35,11 +35,25 @@ homepage = "https://csst-tb.bao.ac.cn/code/csst-cicd/csst-dag"
[tool.setuptools] [tool.setuptools]
# 自动发现 Python 包 # 自动发现 Python 包
packages = {find = {where = ["."]}} packages = { find = { where = ["."] } }
include-package-data = true include-package-data = true
[tool.setuptools.package-data] [tool.setuptools.package-data]
csst_dag = [ csst_dag = [
"dag_config/*", "dag_config/*",
"_dfs/*", "_dfs/*",
] ]
\ No newline at end of file
# 可选:如果需要定义测试、文档等可选依赖组,可以添加如下部分[5,8](@ref)
# [project.optional-dependencies]
# test = [
# "pytest>=6.0",
# ]
# dev = [
# "black",
# "flake8",
# ]
# 如果移动Git依赖,可以这样定义(安装时使用 `pip install 'csst_dag[dfs]'`)
# dfs = [
# "csst-dfs-client @ git+https://csst-tb.bao.ac.cn/code/csst-dfs/csst-dfs-client.git",
# ]
\ No newline at end of file
"""
Aim
---
Process an MSC dataset, given a set of parameters.
Example
-------
python -m csst_dag.cli.msc_l1 -h
python -m csst_dag.cli.csst_msc_l1
--dag csst-msc-l1-mbi csst-msc-l1-sls \
--dataset=csst-msc-c9-25sqdeg-v3 \
--instrument=MSC \
--obs-type=WIDE \
--obs-group=W1 \
--obs-id=10100232366 \
--detector=09 \
--prc-status=-1024 \
--batch-id=test-b1 \
--priority=1 \
--pmapname=csst_000070.pmap \
--ref-cat=trilegal_093 \
--submit
"""
from csst_dag.dag import CSST_DAGS, Dispatcher, BaseDAG
from csst_dag import dfs
import argparse
parser = argparse.ArgumentParser(
description="Scheduler for CSST MSC L1 pipeline.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# data parameters
parser.add_argument("--dataset", type=str, help="Dataset name")
parser.add_argument("--instrument", type=str, help="Instrument name", default=None)
parser.add_argument("--obs-type", type=str, help="Observation type", default=None)
parser.add_argument("--obs-group", type=str, help="Observation group", default=None)
parser.add_argument("--obs-id", type=str, help="Observation ID", default=None)
parser.add_argument("--detector", type=str, help="Detector name", default=None)
parser.add_argument(
"prc-status", type=int, help="Initial processing status", default=-1024
)
# task parameters
parser.add_argument("--batch-id", type=str, help="Batch ID", default="test-batch")
parser.add_argument("--priority", type=str, help="Task priority", default=1)
# DAG parameters
parser.add_argument("--pmapname", type=str, help="CCDS pmapname", default="")
parser.add_argument(
"--ref-cat", type=str, help="Reference catalog", default="trilegal_093"
)
# submit
parser.add_argument("--submit", action="store_true", help="Push results", default=False)
# post-processing parameters
parser.add_argument(
"--final-prc-status", type=int, help="Final processing status", default=-2
)
args = parser.parse_args()
# from csst_dag import DotDict
#
# args = DotDict(
# dataset="csst-msc-c9-25sqdeg-v3",
# instrument="MSC",
# obs_type="WIDE",
# obs_group="W2",
# obs_id="10100232366",
# detector=None,
# prc_status=None,
# batch_id="test-batch",
# priority=1,
# pmapname="csst_000070.pmap",
# ref_cat="trilegal_093",
# submit=False,
# )
print("CLI parameters: ", args)
plan_basis, data_basis = Dispatcher.find_plan_level0_basis(
dataset=args.dataset,
instrument=args.instrument,
obs_type=args.obs_type,
obs_group=args.obs_group,
obs_id=args.obs_id,
detector=args.detector,
prc_status=args.prc_status,
)
print(f"{len(plan_basis)} plan basis, {len(data_basis)} data basis found")
# generate DAG group run
dag_group_run = BaseDAG.generate_dag_group_run(
dag_group="csst-msc-l1",
batch_id=args.batch_id,
priority=args.priority,
)
# generate DAG run list
dag_run_list = CSST_DAGS["csst-msc-l1-mbi"].schedule(
dag_group_run=dag_group_run,
plan_basis=plan_basis,
data_basis=data_basis,
pmapname=args.pmapname,
ref_cat=args.ref_cat,
)
# # submit DAG group run
# dfs.dag.new_dag_group_run(
# dag_group_run=dag_group_run,
# dag_run_list=dag_run_list,
# )
print("dag_group_run", dag_group_run)
print("dag_run_list", dag_run_list[0])
from csst_dag import CSST_DAGS
# dfs = DFS()
# os.environ
test_kwargs = dict(
dataset="csst-msc-c9-25sqdeg-v3",
instrument="MSC",
obs_type="WIDE",
obs_group="W1",
obs_id="10100100412",
proposal_id=None,
prc_status=-1024,
)
dag = CSST_DAGS.get("csst-msc-l1-mbi")
dag_group_run, dag_run_list = dag.run(
**test_kwargs,
extra_kwargs={"a": 1},
)
# currently defined DAGs
for dag in CSST_DAGS.keys():
print(dag)
# csst-msc-l1-qc0
# csst-msc-l1-mbi
# csst-msc-l1-ast
# csst-msc-l1-sls
# csst-msc-l1-ooc
# csst-mci-l1
# csst-mci-l1-qc0
# csst-ifs-l1
# csst-cpic-l1
# csst-cpic-l1-qc0
# csst-hstdm-l1
from csst_dag.dfs import DFS from csst_dag import DFS
import os import os
dfs = DFS() # dfs = DFS()
os.environ os.environ
test_kwargs = dict(
dataset="csst-msc-c9-25sqdeg-v3",
instrument="MSC",
obs_type="WIDE",
obs_group="W1",
# obs_id="10100232366",
proposal_id=None,
)
DFS.dfs1_find_plan(**test_kwargs)
DFS.dfs1_find_plan_basis(**test_kwargs)
DFS.dfs1_find_level0(**test_kwargs)
DFS.dfs1_find_level0_basis(**test_kwargs)
...@@ -8,6 +8,13 @@ print(len(plan_basis), len(data_basis)) ...@@ -8,6 +8,13 @@ print(len(plan_basis), len(data_basis))
print("plan conames: ", plan_basis.colnames) print("plan conames: ", plan_basis.colnames)
print("data conames: ", data_basis.colnames) print("data conames: ", data_basis.colnames)
Dispatcher.dispatch_file(plan_basis, data_basis)
Dispatcher.dispatch_detector(plan_basis, data_basis)
Dispatcher.dispatch_obsid(plan_basis, data_basis)
Dispatcher.dispatch_obsgroup(plan_basis, data_basis)
# # 666 task/s # # 666 task/s
task_list_via_file = Dispatcher.dispatch_file(plan_basis, data_basis[:10]) task_list_via_file = Dispatcher.dispatch_file(plan_basis, data_basis[:10])
t = Table(task_list_via_file) t = Table(task_list_via_file)
......
from csst_dag import DFS, CsstDAG from csst_dag import DFS, CsstDAGs
# dfs = DFS() # dfs = DFS()
dags = CsstDAGs()
dag = CsstDAG.get_dag("csst-msc-l1-qc0") dag = dags.get("csst-msc-l1-qc0")
dag.msg_template
dag.dag_run_template
dag.params dag.params
dag = CsstDAG.get_dag("csst-msc-l1-mbi") dag = dags.get("csst-msc-l1-mbi")
dag.msg_template dag.dag_run_template
dag = CsstDAG.get_dag("csst-msc-l1-sls") dag = dags.get("csst-msc-l1-sls")
dag.msg_template dag.dag_run_template
dag.schedule( dag.schedule(
batch_id="default",
dataset="csst-msc-c9-25sqdeg-v3", dataset="csst-msc-c9-25sqdeg-v3",
obs_type="WIDE", obs_type="WIDE",
project_id="none", obs_group="W1",
batch_id="default",
initial_prc_status=-2, initial_prc_status=-2,
final_prc_status=-2, final_prc_status=-2,
demo=False, demo=False,
) )
from csst_dag.dag.msc import CsstMscL1Mbi from csst_dag.dag.msc import CsstMscL1Mbi
dag = dags.get_dag("csst-msc-l1-mbi") dag = dags.get_dag("csst-msc-l1-mbi")
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment