From d9f64d35e469fd2e5b0bfa219d09f6bda04d1eb3 Mon Sep 17 00:00:00 2001 From: BO ZHANG Date: Tue, 11 Oct 2022 10:53:30 +0800 Subject: [PATCH] added code preference --- doc/source/csst_common/example_interface.py | 4 +- doc/source/index.rst | 2 +- .../example_joblib.py | 0 .../example_multiprocessing.py | 0 .../preference.rst} | 55 +++++++++++++++++-- 5 files changed, 54 insertions(+), 7 deletions(-) rename doc/source/{packages => preference}/example_joblib.py (100%) rename doc/source/{packages => preference}/example_multiprocessing.py (100%) rename doc/source/{packages/packages.rst => preference/preference.rst} (65%) diff --git a/doc/source/csst_common/example_interface.py b/doc/source/csst_common/example_interface.py index cd3439a..e6d2275 100644 --- a/doc/source/csst_common/example_interface.py +++ b/doc/source/csst_common/example_interface.py @@ -98,7 +98,7 @@ def process_single_image( # process multiple images in an exposure (RECOMMENDED, at least for MBI or SLS) # define a single job -def one_job(dm, detector): +def one_job(dm: CsstMsDataManager, detector: int): """ Process a single image, defined for parallel processing. """ filepath_input = dm.l0_detector(detector=detector) filepath_output = dm.l1_detector(detector=detector, post="L1_processed.fits") @@ -108,7 +108,7 @@ def one_job(dm, detector): return -# parallel processing jobs +# process in serial / parallel def process_multiple_images( dm: CsstMsDataManager, logger: Union[None, logging.Logger] = None, diff --git a/doc/source/index.rst b/doc/source/index.rst index 9ec4753..b546430 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -62,7 +62,7 @@ The guide for LSST developers vcs.rst packaging.rst codestyle.rst - packages/packages.rst + preference/preference.rst unittest.rst simulation.rst csst_common/csst_common.rst diff --git a/doc/source/packages/example_joblib.py b/doc/source/preference/example_joblib.py similarity index 100% rename from doc/source/packages/example_joblib.py rename to doc/source/preference/example_joblib.py diff --git a/doc/source/packages/example_multiprocessing.py b/doc/source/preference/example_multiprocessing.py similarity index 100% rename from doc/source/packages/example_multiprocessing.py rename to doc/source/preference/example_multiprocessing.py diff --git a/doc/source/packages/packages.rst b/doc/source/preference/preference.rst similarity index 65% rename from doc/source/packages/packages.rst rename to doc/source/preference/preference.rst index f142530..284fab8 100644 --- a/doc/source/packages/packages.rst +++ b/doc/source/preference/preference.rst @@ -1,13 +1,13 @@ -Package preference -================== +Code Preference +=============== Initially we want our developers to following the `coding guidelines for astropy-affiliated packages `_ as much as possible. A few important conventions and special cases should be outlined here. -Basic preference ----------------- +Package preference +------------------ Several packages are favored over others if they can be used to solve the problem under study. Developers should use them as much as possible. @@ -66,3 +66,50 @@ The output is For parallel computing with inter-communication or distributed computing, we recommend developers to consider using ``mpi4py``: https://github.com/mpi4py/mpi4py. + + +Global variables +---------------- + +Usage of ``global`` should be prohibited. +In most cases, variables should be kept in their default scopes. + +Using ``subprocess`` +-------------------- + +``subprocess.run()`` is favored over ``subprocess.Popen()`` and ``os.system()`` as suggested in Python documentation: + +- The ``subprocess`` module allows you to spawn new processes, connect to their input/output/error pipes, + and obtain their return codes. This module intends to replace several older modules and functions: + + .. code-block:: python + + os.system + os.spawn* + +- The recommended approach to invoking ``subprocesses`` is to use the ``run()`` function for all use cases + it can handle. For more advanced use cases, the underlying ``Popen`` interface can be used directly. + + +Numpy multithreading +-------------------- + +Numpy sometime automatically uses multithreading. To see if you are actually using OpenBLAS or MKL, use + +.. code-block:: python + + numpy.__config__.show() + +To set shut down this feature, use + +.. code-block:: python + + export MKL_NUM_THREADS=1 + export NUMEXPR_NUM_THREADS=1 + export OMP_NUM_THREADS=1 + export VECLIB_MAXIMUM_THREADS=1 + +.. note:: + In most cases, this automatic multithreading does not enhance the performance in practice. + Therefore, the above setting will be used in CSST pipeline globally. + -- GitLab