Skip to content
This repository was archived by the owner on Feb 3, 2025. It is now read-only.

Commit 0f999c4

Browse files
author
DEKHTIARJonathan
committed
[Benchmarking-Py] Release 1.1.0 - Replacing all print() calls by logging.<level>() calls
1 parent 95e097c commit 0f999c4

File tree

17 files changed

+651
-81
lines changed

17 files changed

+651
-81
lines changed

tftrt/benchmarking-python/CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,10 @@ Description of the change
4646

4747
<!-- YOU CAN EDIT FROM HERE -->
4848

49+
## [1.1.0] - 2022.07.25 - @DEKHTIARJonathan
50+
51+
Replacing all `print()` calls by `logging.<level>()` calls
52+
4953
## [1.0.1] - 2022.07.25 - @DEKHTIARJonathan
5054

5155
Removing AutoTuning on `get_dequeue_batch_fn` because DALIDataset was not
Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,16 @@
1-
#!/usr/bin/env python
1+
#! /usr/bin/python
22
# -*- coding: utf-8 -*-
3+
4+
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
5+
#
6+
# Licensed under the Apache License, Version 2.0 (the "License");
7+
# you may not use this file except in compliance with the License.
8+
# You may obtain a copy of the License at
9+
#
10+
# http://www.apache.org/licenses/LICENSE-2.0
11+
#
12+
# Unless required by applicable law or agreed to in writing, software
13+
# distributed under the License is distributed on an "AS IS" BASIS,
14+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
# See the License for the specific language governing permissions and
16+
# limitations under the License.

tftrt/benchmarking-python/benchmark_args.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from tensorflow.python.saved_model.signature_constants import \
1414
DEFAULT_SERVING_SIGNATURE_DEF_KEY
1515

16+
from benchmark_logger import logging
1617
from benchmark_utils import print_dict
1718

1819

@@ -392,6 +393,9 @@ def _post_process_args(self, args):
392393
# Let's fix it to 1 to save memory.
393394
args.total_max_samples = 1
394395

396+
if args.debug or args.debug_data_aggregation or args.debug_performance:
397+
logging.set_verbosity(logging.DEBUG)
398+
395399
return args
396400

397401
def parse_args(self):
@@ -400,7 +404,7 @@ def parse_args(self):
400404
args = self._post_process_args(args)
401405
self._validate_args(args)
402406

403-
print("\nBenchmark arguments:")
407+
logging.info("Benchmark arguments:")
404408
print_dict(vars(args))
405409
print()
406410

tftrt/benchmarking-python/benchmark_autotuner.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import numpy as np
88
import tensorflow as tf
99

10+
from benchmark_logger import logging
1011
from benchmark_utils import force_gpu_resync
1112

1213

@@ -35,21 +36,20 @@ def _autotune(self, *arg, **kwargs):
3536
output = self._fns[fn_id](*arg, **kwargs)
3637
self._timings[fn_id].append(time.time() - start_t)
3738
except IndexError:
38-
print(
39-
"\n[DEBUG] AutoTuning is over... Collecting timing statistics:"
40-
)
39+
print() # visual spacing
40+
logging.debug("AutoTuning is over... Collecting timing statistics:")
4141
perf_data = []
4242
for idx, fn_stat in enumerate(self._timings):
4343
perf_data.append(np.mean(fn_stat[self._skip_n_first:]))
44-
print(
45-
f"\t- [DEBUG] Function ID: {idx} - "
44+
logging.debug(
45+
f"\t- Function ID: {idx} - "
4646
f"Name: {self._fns[idx].__name__:40s} - "
4747
f"Average Exec Time: {perf_data[-1]}"
4848
)
4949

5050
best_fn_id = np.argmin(perf_data)
51-
print(
52-
f"[DEBUG] Selecting function ID: {best_fn_id}. "
51+
logging.debug(
52+
f"Selecting function ID: {best_fn_id}. "
5353
f"Setting exec path to: `{self._fns[best_fn_id].__name__}`\n"
5454
)
5555

@@ -71,7 +71,7 @@ def _wrapper(*args, **kwargs):
7171
try:
7272
return context[0](*args, **kwargs)
7373
except IndexError:
74-
print(f"[INFO] Building the concrete function")
74+
logging.info(f"Building the concrete function")
7575
context.append(func.get_concrete_function(*args, **kwargs))
7676
return context[0](*args, **kwargs)
7777

@@ -106,8 +106,8 @@ def resync_gpu_wrap_fn(_func, str_appended):
106106
funcs2autotune = [eager_function, tf_function]
107107

108108
if use_synthetic_data:
109-
print(
110-
"[INFO] Allowing direct concrete_function call with "
109+
logging.debug(
110+
"Allowing direct concrete_function call with "
111111
"synthetic data loader."
112112
)
113113

tftrt/benchmarking-python/versioning_utils.py renamed to tftrt/benchmarking-python/benchmark_info.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,11 @@
77
import subprocess
88
import shlex
99

10+
# The `__version__` number shall be updated everytime core benchmarking files
11+
# are updated.
12+
# Please update CHANGELOG.md with a description of what this version changed.
13+
__version__ = "1.1.0"
14+
1015

1116
def get_commit_id():
1217

Lines changed: 241 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,241 @@
1+
#! /usr/bin/python
2+
# -*- coding: utf-8 -*-
3+
4+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
5+
#
6+
# Licensed under the Apache License, Version 2.0 (the "License");
7+
# you may not use this file except in compliance with the License.
8+
# You may obtain a copy of the License at
9+
#
10+
# http://www.apache.org/licenses/LICENSE-2.0
11+
#
12+
# Unless required by applicable law or agreed to in writing, software
13+
# distributed under the License is distributed on an "AS IS" BASIS,
14+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
# See the License for the specific language governing permissions and
16+
# limitations under the License.
17+
18+
import os
19+
import sys
20+
21+
import inspect
22+
import warnings
23+
24+
from contextlib import contextmanager
25+
26+
from six import add_metaclass
27+
28+
import logging as _logging
29+
30+
from benchmark_info import __version__
31+
32+
from logging_utils.formatters import BaseFormatter
33+
from logging_utils.metaclasses import SingletonMetaClass
34+
35+
__all__ = [
36+
'Logger',
37+
]
38+
39+
40+
class StdOutFormatter(BaseFormatter):
41+
DEFAULT_FORMAT = f"%(color)s[BENCH - v{__version__}] "
42+
DEFAULT_FORMAT += "%(levelname)-8s: %(end_color)s%(message)s"
43+
44+
45+
@add_metaclass(SingletonMetaClass)
46+
class Logger(object):
47+
48+
# Level 0
49+
NOTSET = _logging.NOTSET
50+
51+
# Level 10
52+
DEBUG = _logging.DEBUG
53+
54+
# Level 20
55+
INFO = _logging.INFO
56+
57+
# Level 30
58+
WARNING = _logging.WARNING
59+
60+
# Level 40
61+
ERROR = _logging.ERROR
62+
63+
# Level 50
64+
CRITICAL = _logging.CRITICAL
65+
66+
_level_names = {
67+
0: 'NOTSET',
68+
10: 'DEBUG',
69+
20: 'INFO',
70+
30: 'WARNING',
71+
40: 'ERROR',
72+
50: 'CRITICAL',
73+
}
74+
75+
def __init__(self, capture_io=True):
76+
77+
self._logger = None
78+
79+
self._handlers = dict()
80+
81+
self._define_logger()
82+
83+
def _define_logger(self):
84+
85+
# Use double-checked locking to avoid taking lock unnecessarily.
86+
if self._logger is not None:
87+
return self._logger
88+
89+
try:
90+
# Scope the TensorFlow logger to not conflict with users' loggers.
91+
self._logger = _logging.getLogger('benchmarking_suite')
92+
self.reset_stream_handler()
93+
94+
finally:
95+
self.set_verbosity(verbosity_level=Logger.INFO)
96+
97+
self._logger.propagate = False
98+
99+
def reset_stream_handler(self):
100+
101+
if self._logger is None:
102+
raise RuntimeError(
103+
"Impossible to set handlers if the Logger is not predefined"
104+
)
105+
106+
# ======== Remove Handler if already existing ========
107+
108+
try:
109+
self._logger.removeHandler(self._handlers["stream_stdout"])
110+
except KeyError:
111+
pass
112+
113+
try:
114+
self._logger.removeHandler(self._handlers["stream_stderr"])
115+
except KeyError:
116+
pass
117+
118+
# ================= Streaming Handler =================
119+
120+
# Add the output handler.
121+
self._handlers["stream_stdout"] = _logging.StreamHandler(sys.stdout)
122+
self._handlers["stream_stdout"].addFilter(
123+
lambda record: record.levelno <= _logging.INFO
124+
)
125+
126+
self._handlers["stream_stderr"] = _logging.StreamHandler(sys.stderr)
127+
self._handlers["stream_stderr"].addFilter(
128+
lambda record: record.levelno > _logging.INFO
129+
)
130+
131+
Formatter = StdOutFormatter
132+
133+
self._handlers["stream_stdout"].setFormatter(Formatter())
134+
self._logger.addHandler(self._handlers["stream_stdout"])
135+
136+
try:
137+
self._handlers["stream_stderr"].setFormatter(Formatter())
138+
self._logger.addHandler(self._handlers["stream_stderr"])
139+
except KeyError:
140+
pass
141+
142+
def get_verbosity(self):
143+
"""Return how much logging output will be produced."""
144+
if self._logger is not None:
145+
return self._logger.getEffectiveLevel()
146+
147+
def set_verbosity(self, verbosity_level):
148+
"""Sets the threshold for what messages will be logged."""
149+
if self._logger is not None:
150+
self._logger.setLevel(verbosity_level)
151+
152+
for handler in self._logger.handlers:
153+
handler.setLevel(verbosity_level)
154+
155+
@contextmanager
156+
def temp_verbosity(self, verbosity_level):
157+
"""Sets the a temporary threshold for what messages will be logged."""
158+
159+
if self._logger is not None:
160+
161+
old_verbosity = self.get_verbosity()
162+
163+
try:
164+
self.set_verbosity(verbosity_level)
165+
yield
166+
167+
finally:
168+
self.set_verbosity(old_verbosity)
169+
170+
else:
171+
try:
172+
yield
173+
174+
finally:
175+
pass
176+
177+
def debug(self, msg, *args, **kwargs):
178+
"""
179+
Log 'msg % args' with severity 'DEBUG'.
180+
181+
To pass exception information, use the keyword argument exc_info with
182+
a true value, e.g.
183+
184+
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
185+
"""
186+
if self._logger is not None:
187+
self._logger._log(Logger.DEBUG, msg, args, **kwargs)
188+
189+
def info(self, msg, *args, **kwargs):
190+
"""
191+
Log 'msg % args' with severity 'INFO'.
192+
193+
To pass exception information, use the keyword argument exc_info with
194+
a true value, e.g.
195+
196+
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
197+
"""
198+
if self._logger is not None:
199+
self._logger._log(Logger.INFO, msg, args, **kwargs)
200+
201+
def warning(self, msg, *args, **kwargs):
202+
"""
203+
Log 'msg % args' with severity 'WARNING'.
204+
205+
To pass exception information, use the keyword argument exc_info with
206+
a true value, e.g.
207+
208+
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
209+
"""
210+
if self._logger is not None:
211+
self._logger._log(Logger.WARNING, msg, args, **kwargs)
212+
213+
def error(self, msg, *args, **kwargs):
214+
"""
215+
Log 'msg % args' with severity 'ERROR'.
216+
217+
To pass exception information, use the keyword argument exc_info with
218+
a true value, e.g.
219+
220+
logger.error("Houston, we have a %s", "major problem", exc_info=1)
221+
"""
222+
if self._logger is not None:
223+
self._logger._log(Logger.ERROR, msg, args, **kwargs)
224+
225+
def critical(self, msg, *args, **kwargs):
226+
"""
227+
Log 'msg % args' with severity 'CRITICAL'.
228+
229+
To pass exception information, use the keyword argument exc_info with
230+
a true value, e.g.
231+
232+
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
233+
"""
234+
if self._logger is not None:
235+
self._logger._log(Logger.CRITICAL, msg, args, **kwargs)
236+
237+
238+
# Necessary to catch the correct caller
239+
_logging._srcfile = os.path.normcase(inspect.getfile(Logger.__class__))
240+
241+
logging = Logger()

0 commit comments

Comments
 (0)