Skip to content

Codebase Reference

This document provides an overview of the main components of the grid_data_retrieval package, detailing the primary modules and their functionalities.

grid_data_retrieval.runner

Grid Data Retrieval Runner

Orchestrates grid data fetching from APIs.

This module handles ONLY data retrieval. Subsequent processing (gap-filling, resampling, timezone conversion) should be done via the data_cleaning_and_joining module.

run_grid_retrieval

run_grid_retrieval(config, *, logger=None, verbose=True)

Execute grid data retrieval from API.

This function ONLY fetches and combines data. Processing happens elsewhere.

Parameters:

Name Type Description Default
config dict

Configuration dictionary containing: - start_date : str (YYYY-MM-DD HH:MM:SS) - end_date : str (YYYY-MM-DD HH:MM:SS) - api_url : str (optional) - overwrite_existing : bool (optional, default: True) - combine_files : bool (optional, default: True)

required
logger Logger

Pre-configured logger instance.

None
verbose bool

Whether to echo logs to console.

True

Returns:

Type Description
int

Exit code: 0=success, 1=error.

Source code in packages/grid_data_retrieval/src/grid_data_retrieval/runner.py
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
def run_grid_retrieval(
    config: dict,
    *,
    logger=None,
    verbose: bool = True,
) -> int:
    """
    Execute grid data retrieval from API.

    This function ONLY fetches and combines data. Processing happens elsewhere.

    Parameters
    ----------
    config : dict
        Configuration dictionary containing:
        - start_date : str (YYYY-MM-DD HH:MM:SS)
        - end_date : str (YYYY-MM-DD HH:MM:SS)
        - api_url : str (optional)
        - overwrite_existing : bool (optional, default: True)
        - combine_files : bool (optional, default: True)
    logger : logging.Logger, optional
        Pre-configured logger instance.
    verbose : bool, optional
        Whether to echo logs to console.

    Returns
    -------
    int
        Exit code: 0=success, 1=error.
    """
    # Initialize logger if not provided
    if logger is None:
        package_log_dir = log_dir(create=True) / "grid_data_retrieval"
        logger = setup_logger(str(package_log_dir), verbose=verbose)
        log_msg(f"Logging initialized at {package_log_dir}", logger, echo_console=verbose, force=True)

    log_msg("=" * 60, logger, echo_console=verbose)
    log_msg(f"Starting Grid Data Retrieval at {datetime.now().isoformat()}",
            logger=logger, echo_console=verbose, force=True)
    log_msg("=" * 60, logger, echo_console=verbose)

    try:
        # Extract config
        start_date = config["start_date"]
        end_date = config["end_date"]
        api_url = config.get("api_url", DEFAULT_API_URL)
        overwrite_existing = config.get("overwrite_existing", False)
        combine_files = config.get("combine_files", True)

        # Define output directories
        # Users can override via config["output_dir"], otherwise use defaults from constants above
        if "output_dir" in config and config["output_dir"] is not None:
            base_output_dir = Path(config["output_dir"])
        else:
            # Use constants defined at top of file
            base_output_dir = data_dir(create=True) / OUTPUT_BASE_DIR

        monthly_dir = base_output_dir / MONTHLY_SUBDIR
        combined_dir = base_output_dir

        # Step 1: Fetch monthly batches
        log_msg("\n" + "=" * 60, logger, echo_console=verbose)
        log_msg("Fetching Monthly Batches from API", logger, echo_console=verbose, force=True)
        log_msg("=" * 60, logger, echo_console=verbose)

        monthly_files = fetch_monthly_batches(
            start_date=start_date,
            end_date=end_date,
            api_url=api_url,
            output_dir=monthly_dir,
            overwrite_existing=overwrite_existing,
            logger=logger,
            echo_console=verbose,
        )

        if not monthly_files:
            log_msg("No monthly files to process. Exiting.", logger, level="warning", echo_console=verbose, force=True)
            return 0

        log_msg(f"Fetched {len(monthly_files)} monthly file(s).", logger, echo_console=verbose, force=True)

        # Step 2: Optionally combine monthly files
        if combine_files:
            log_msg("\n" + "=" * 60, logger, echo_console=verbose)
            log_msg("Combining Monthly Files", logger, echo_console=verbose, force=True)
            log_msg("=" * 60, logger, echo_console=verbose)

            combined_path = combine_monthly_files(
                monthly_dir=monthly_dir,
                output_dir=combined_dir,
                logger=logger,
                echo_console=verbose,
            )

            log_msg(f"Combined file created: {combined_path}", logger, echo_console=verbose, force=True)
            final_output = combined_path
        else:
            final_output = monthly_dir

        # Success
        log_msg("\n" + "=" * 60, logger, echo_console=verbose)
        log_msg("Retrieval completed successfully!", logger, echo_console=verbose, force=True)
        log_msg(f"Raw data saved to: {final_output}", logger, echo_console=verbose, force=True)
        log_msg("=" * 60, logger, echo_console=verbose)
        log_msg("\nNext steps:", logger, echo_console=verbose, force=True)
        log_msg("  - Use data_cleaning_and_joining module for processing", logger, echo_console=verbose, force=True)
        log_msg("  - Apply gap-filling, resampling, timezone conversion as needed", logger, echo_console=verbose, force=True)

        return 0

    except Exception as e:
        log_msg(f"Retrieval failed with exception: {e}", logger=logger, level="exception", echo_console=verbose, force=True)
        return 1

grid_data_retrieval.io.cli

Command-Line Interface

CLI for grid data retrieval (fetching only).

Data processing should be done via the data_cleaning_and_joining module.

parse_args

parse_args()

Parse command-line arguments.

Returns:

Type Description
Namespace

Parsed arguments.

Source code in packages/grid_data_retrieval/src/grid_data_retrieval/io/cli.py
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
def parse_args() -> argparse.Namespace:
    """
    Parse command-line arguments.

    Returns
    -------
    argparse.Namespace
        Parsed arguments.
    """
    parser = argparse.ArgumentParser(
        description="Grid Data Retrieval - Fetch electricity grid data from APIs.",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  # Using configuration file
  osme-grid-fetch --config configs/grid/my_config.json

  # Using command-line arguments
  osme-grid-fetch --start-date "2020-01-01 00:00:00" --end-date "2020-12-31 23:55:00"

  # Re-download existing files
  osme-grid-fetch --config my_config.json --overwrite-existing

  # Verbose mode
  osme-grid-fetch --config configs/grid/my_config.json --verbose

Note: This command only FETCHES data. For processing (resampling, timezone
conversion, gap-filling), use the data_cleaning_and_joining module.
        """
    )

    # Config file or manual parameters
    parser.add_argument(
        "--config",
        type=str,
        default=None,
        help="Path to JSON configuration file. If provided, other arguments are ignored.",
    )

    # Manual parameters (used if --config not provided)
    parser.add_argument(
        "--start-date",
        type=str,
        default=DEFAULT_START_DATE,
        help='Start date (format: "YYYY-MM-DD HH:MM:SS").',
    )

    parser.add_argument(
        "--end-date",
        type=str,
        default=DEFAULT_END_DATE,
        help='End date (format: "YYYY-MM-DD HH:MM:SS").',
    )

    parser.add_argument(
        "--api-url",
        type=str,
        default=DEFAULT_API_URL,
        help="API URL for grid data retrieval.",
    )

    parser.add_argument(
        "--output-dir",
        type=str,
        default=None,
        help='Output directory for data files (default: data/grid_data/raw/).',
    )

    parser.add_argument(
        "--overwrite-existing",
        action="store_true",
        dest="overwrite_existing",
        help="Overwrite existing files if they already exist (default: skip existing).",
    )

    parser.add_argument(
        "--no-combine",
        action="store_false",
        dest="combine_files",
        help="Don't combine monthly files into single dataset (default: combine files).",
    )

    # Verbosity
    parser.add_argument(
        "--verbose",
        action="store_true",
        default=False,
        help="Enable verbose console output.",
    )

    parser.add_argument(
        "--quiet",
        action="store_true",
        default=False,
        help="Suppress console output (logs still written to file).",
    )

    # Set defaults for boolean flags
    parser.set_defaults(overwrite_existing=False, combine_files=True)

    return parser.parse_args()

main

main()

Main entry point for the grid data retrieval CLI.

This function: 1. Parses CLI arguments or loads configuration file. 2. Initializes logging. 3. Executes the grid data fetching.

Automatically invoked by the osme-grid-fetch CLI script.

Source code in packages/grid_data_retrieval/src/grid_data_retrieval/io/cli.py
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
def main():
    """
    Main entry point for the grid data retrieval CLI.

    This function:
      1. Parses CLI arguments or loads configuration file.
      2. Initializes logging.
      3. Executes the grid data fetching.

    Automatically invoked by the `osme-grid-fetch` CLI script.
    """
    args = parse_args()

    # Determine verbosity
    if args.quiet:
        verbose = False
    elif args.verbose:
        verbose = True
    else:
        # Default: verbose if interactive, quiet if config
        verbose = args.config is None

    # Initialize logger
    package_log_dir = log_dir(create=True) / "grid_data_retrieval"
    logger = setup_logger(str(package_log_dir), verbose=verbose)

    try:
        # Load configuration
        if args.config:
            log_msg(f"Loading configuration from: {args.config}", logger, echo_console=verbose, force=True)
            config = load_config(args.config)
        else:
            # Build config from CLI arguments
            config = {
                "start_date": args.start_date,
                "end_date": args.end_date,
                "api_url": args.api_url,
                "output_dir": args.output_dir,
                "overwrite_existing": args.overwrite_existing,  # Default False, --overwrite-existing sets to True
                "combine_files": args.combine_files,  # Default True, --no-combine sets to False
            }

        # Run retrieval
        exit_code = run_grid_retrieval(
            config=config,
            logger=logger,
            verbose=verbose,
        )

        sys.exit(exit_code)

    except Exception as e:
        log_msg(f"Critical error: {e}", logger, level="exception", echo_console=True, force=True)
        sys.exit(1)

grid_data_retrieval.io.config_loader

Configuration File Loading

Load and validate JSON configuration files for grid data retrieval.

load_config

load_config(file_path)

Load configuration from a JSON file.

Searches in: 1. Absolute path (if provided) 2. Relative to config_dir() 3. Relative to config_dir()/grid/

Parameters:

Name Type Description Default
file_path str or Path

Path to JSON config file.

required

Returns:

Type Description
dict

Configuration dictionary.

Raises:

Type Description
FileNotFoundError

If the configuration file cannot be found.

Source code in packages/grid_data_retrieval/src/grid_data_retrieval/io/config_loader.py
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def load_config(file_path: str | Path) -> dict:
    """
    Load configuration from a JSON file.

    Searches in:
    1. Absolute path (if provided)
    2. Relative to config_dir()
    3. Relative to config_dir()/grid/

    Parameters
    ----------
    file_path : str or Path
        Path to JSON config file.

    Returns
    -------
    dict
        Configuration dictionary.

    Raises
    ------
    FileNotFoundError
        If the configuration file cannot be found.
    """
    cfg_path = _resolve_config_path(file_path)
    with open(cfg_path, "r", encoding="utf-8") as f:
        return json.load(f)

grid_data_retrieval.sources.carbontracker

CarbonTracker Merit API Data Retrieval

Fetches grid electricity data from the CarbonTracker Merit API and saves monthly Parquet files.

Functions:

Name Description
fetch_monthly_batches : Retrieve data for all months in date range
combine_monthly_files : Merge monthly files into single dataset

fetch_monthly_batches

fetch_monthly_batches(
    start_date,
    end_date,
    api_url,
    output_dir,
    *,
    overwrite_existing=True,
    logger=None,
    echo_console=True
)

Fetch grid data from CarbonTracker API in monthly batches.

Parameters:

Name Type Description Default
start_date str

Start date in format "YYYY-MM-DD HH:MM:SS".

required
end_date str

End date in format "YYYY-MM-DD HH:MM:SS".

required
api_url str

Base URL for the Merit API.

required
output_dir Path or str

Directory to save monthly Parquet files.

required
overwrite_existing bool

Whether to overwrite months that already have files.

True
logger Logger

Logger instance.

None
echo_console bool

Whether to echo to console.

True

Returns:

Type Description
List[Path]

List of paths to fetched/existing monthly files.

Source code in packages/grid_data_retrieval/src/grid_data_retrieval/sources/carbontracker.py
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
def fetch_monthly_batches(
    start_date: str,
    end_date: str,
    api_url: str,
    output_dir: Path | str,
    *,
    overwrite_existing: bool = True,
    logger=None,
    echo_console: bool = True,
) -> List[Path]:
    """
    Fetch grid data from CarbonTracker API in monthly batches.

    Parameters
    ----------
    start_date : str
        Start date in format "YYYY-MM-DD HH:MM:SS".
    end_date : str
        End date in format "YYYY-MM-DD HH:MM:SS".
    api_url : str
        Base URL for the Merit API.
    output_dir : Path or str
        Directory to save monthly Parquet files.
    overwrite_existing : bool, optional
        Whether to overwrite months that already have files.
    logger : logging.Logger, optional
        Logger instance.
    echo_console : bool, optional
        Whether to echo to console.

    Returns
    -------
    List[Path]
        List of paths to fetched/existing monthly files.
    """
    output_dir = Path(output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)

    start_dt = datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S")
    end_dt = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S")

    log_msg(f"Fetching monthly batches from {start_dt}{end_dt}", logger, echo_console=echo_console)
    log_msg(f"API URL: {api_url}", logger, echo_console=echo_console)
    log_msg(f"Output directory: {output_dir}", logger, echo_console=echo_console)

    monthly_files = []
    months = list(_month_ranges(start_dt, end_dt))

    for month_start, month_end in tqdm(months, desc="Fetching monthly data", disable=not echo_console):
        file_path = _month_filename(output_dir, month_start)

        # Skip if exists
        if not overwrite_existing and file_path.exists():
            log_msg(f"Skipping existing: {file_path.name}", logger, echo_console=echo_console)
            monthly_files.append(file_path)
            continue

        # Prepare API request
        ms_str = url_escape(month_start.strftime("%Y-%m-%d %H:%M:%S"), safe=":")
        me_str = url_escape(month_end.strftime("%Y-%m-%d %H:%M:%S"), safe=":")

        url = (
            f"{api_url}"
            f"?start_time={ms_str}"
            f"&end_time={me_str}"
            f"&corrected_values=true"
        )

        try:
            response = requests.get(url, timeout=30)
            response.raise_for_status()
            raw_json = json.loads(response.json())

            # Validate response
            if "timeseries_values" not in raw_json:
                log_msg(
                    f"API error for {month_start:%Y-%m}: {raw_json}",
                    logger,
                    level="error",
                    echo_console=echo_console,
                    force=True
                )
                continue

            # Extract data
            values = raw_json["timeseries_values"]
            timestamps = values["timestamps"]

            rows = []
            for j in range(len(timestamps)):
                row = {"timestamp": timestamps[j]}
                for var in VARIABLES_TO_COLLECT:
                    row[var] = values[var][j]
                rows.append(row)

            # Create DataFrame
            df = pl.DataFrame(rows)
            df = df.with_columns(
                pl.col("timestamp").str.strptime(pl.Datetime, format="%Y-%m-%d %H:%M:%S")
            )

            # Save
            df.write_parquet(file_path)
            log_msg(f"Saved: {file_path.name}", logger, echo_console=echo_console)
            monthly_files.append(file_path)

        except requests.exceptions.RequestException as e:
            log_msg(
                f"Request error for {month_start:%Y-%m}: {e}",
                logger,
                level="error",
                echo_console=echo_console,
                force=True
            )
        except Exception as e:
            log_msg(
                f"Unexpected error for {month_start:%Y-%m}: {e}",
                logger,
                level="exception",
                echo_console=echo_console,
                force=True
            )

        # Rate limiting
        time.sleep(API_DELAY_SECONDS)

    return monthly_files

combine_monthly_files

combine_monthly_files(
    monthly_dir,
    output_dir,
    *,
    logger=None,
    echo_console=True
)

Combine all monthly Parquet files into a single dataset.

Parameters:

Name Type Description Default
monthly_dir Path or str

Directory containing monthly Parquet files.

required
output_dir Path or str

Directory for combined output file.

required
logger Logger

Logger instance.

None
echo_console bool

Whether to echo to console.

True

Returns:

Type Description
Path

Path to the combined output file.

Source code in packages/grid_data_retrieval/src/grid_data_retrieval/sources/carbontracker.py
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
def combine_monthly_files(
    monthly_dir: Path | str,
    output_dir: Path | str,
    *,
    logger=None,
    echo_console: bool = True,
) -> Path:
    """
    Combine all monthly Parquet files into a single dataset.

    Parameters
    ----------
    monthly_dir : Path or str
        Directory containing monthly Parquet files.
    output_dir : Path or str
        Directory for combined output file.
    logger : logging.Logger, optional
        Logger instance.
    echo_console : bool, optional
        Whether to echo to console.

    Returns
    -------
    Path
        Path to the combined output file.
    """
    monthly_dir = Path(monthly_dir)
    output_dir = Path(output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)

    # Find monthly files
    files = sorted(monthly_dir.glob("carbontracker_grid-data_*.parquet"))

    if not files:
        raise FileNotFoundError(f"No monthly Parquet files found in {monthly_dir}")

    log_msg(f"Found {len(files)} monthly file(s) to combine.", logger, echo_console=echo_console)

    # Load lazily
    lazy_frames = [pl.scan_parquet(f) for f in files]
    combined_lazy = pl.concat(lazy_frames, how="vertical_relaxed")

    # Get timestamp range for filename
    min_max = combined_lazy.select([
        pl.col("timestamp").min().alias("min_ts"),
        pl.col("timestamp").max().alias("max_ts"),
    ]).collect()

    min_ts = min_max["min_ts"][0]
    max_ts = min_max["max_ts"][0]

    start_str = min_ts.strftime("%Y-%m")
    end_str = max_ts.strftime("%Y-%m")

    # Construct output path
    output_path = output_dir / f"carbontracker_grid-data_{start_str}_{end_str}.parquet"

    log_msg(f"Date range: {min_ts}{max_ts}", logger, echo_console=echo_console)
    log_msg(f"Writing combined file: {output_path.name}", logger, echo_console=echo_console)

    # Write to Parquet
    combined_lazy.sort("timestamp").sink_parquet(output_path)

    log_msg(f"Combined file created successfully.", logger, echo_console=echo_console, force=True)

    return output_path

grid_data_retrieval.utils.logging

Logging Infrastructure

Provides centralized logging for the grid_data_retrieval package.

Reuses patterns from osme_common and weather_data_retrieval for consistency.

setup_logger

setup_logger(save_dir=None, verbose=True)

Initialize and return a configured logger.

Logs are written to /logs/grid_data_retrieval (or $OSME_LOG_DIR/grid_data_retrieval).

Parameters:

Name Type Description Default
save_dir str or None

Directory to save log files. If None, defaults to osme_common.paths.log_dir().

None
verbose bool

Whether to echo logs to console (via console handler).

True

Returns:

Type Description
Logger

Configured logger instance.

Source code in packages/grid_data_retrieval/src/grid_data_retrieval/utils/logging.py
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def setup_logger(
    save_dir: str | None = None,
    verbose: bool = True,
) -> logging.Logger:
    """
    Initialize and return a configured logger.

    Logs are written to <repo_root>/logs/grid_data_retrieval (or $OSME_LOG_DIR/grid_data_retrieval).

    Parameters
    ----------
    save_dir : str or None, optional
        Directory to save log files. If None, defaults to osme_common.paths.log_dir().
    verbose : bool, optional
        Whether to echo logs to console (via console handler).

    Returns
    -------
    logging.Logger
        Configured logger instance.
    """
    # Resolve log directory
    base_dir = Path(save_dir) if save_dir else log_dir(create=True)
    base_dir.mkdir(parents=True, exist_ok=True)

    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    log_path = base_dir / f"grid_retrieval_{timestamp}.log"

    logger = logging.getLogger("grid_retrieval")
    logger.setLevel(logging.DEBUG)

    # Clear old handlers safely
    if logger.hasHandlers():
        for h in list(logger.handlers):
            logger.removeHandler(h)

    # File handler – DEBUG (captures everything)
    fh = logging.FileHandler(log_path)
    fh.setLevel(logging.DEBUG)
    fh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s | %(message)s"))
    logger.addHandler(fh)

    logger.info(f"Logging initialized at {log_path}")
    return logger

log_msg

log_msg(
    msg,
    logger,
    *,
    level="info",
    echo_console=False,
    force=False
)

Unified logging utility.

  • Always logs to file.
  • Optionally echoes to console via tqdm.write (non-blocking).

Parameters:

Name Type Description Default
msg str

Message to log.

required
logger Logger

Logger instance.

required
level str

Log level: "debug", "info", "warning", "error", "exception".

'info'
echo_console bool

Print to console when True.

False
force bool

Print to console regardless of echo_console (for critical messages).

False

Returns:

Type Description
None
Source code in packages/grid_data_retrieval/src/grid_data_retrieval/utils/logging.py
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
def log_msg(
    msg: str,
    logger,
    *,
    level: str = "info",
    echo_console: bool = False,
    force: bool = False,
) -> None:
    """
    Unified logging utility.

    - Always logs to file.
    - Optionally echoes to console via tqdm.write (non-blocking).

    Parameters
    ----------
    msg : str
        Message to log.
    logger : logging.Logger
        Logger instance.
    level : str, optional
        Log level: "debug", "info", "warning", "error", "exception".
    echo_console : bool, optional
        Print to console when True.
    force : bool, optional
        Print to console regardless of echo_console (for critical messages).

    Returns
    -------
    None
    """
    if not logger:
        raise ValueError("Logger instance must be provided to log_msg().")

    log_fn = getattr(logger, level, logger.info)
    log_fn(msg)

    if force or echo_console:
        tqdm.write(s=msg)