Compare commits

...

37 Commits

Author SHA1 Message Date
goksu
210471bcc6
clean whitespace 2023-09-18 14:46:47 +03:00
goksu
f2c7d4bf89
last changes 2023-09-18 14:23:47 +03:00
goksu
a6c2517e68
doc 2023-09-18 09:32:17 +03:00
goksu
dc5a98f70d
html creator copied to objs while benchmark 2023-09-17 14:08:14 +03:00
goksu
3bbc050c31
page changes 2023-09-12 13:01:57 +03:00
goksu
267f1914d6
working fine 2023-08-30 18:15:08 +03:00
goksu
a13ad841db
timer fix 2023-08-29 10:25:11 +03:00
goksu
f3dfede62f
outliers 2023-08-18 17:42:53 +03:00
goksu
3553148174
detecting outliers 2023-08-18 02:04:38 +03:00
goksu
4bcd9711da
milli to micro 2023-08-07 15:11:28 +03:00
goksu
c3254394f3
warmup iteration instead time 2023-08-07 15:07:19 +03:00
goksu
d7371720b0
warmup 2023-08-03 19:08:57 +03:00
goksu
fae5e8c941
little fixes 2023-08-01 17:37:55 +03:00
goksu
5025ea6e49
SOLID python and bug fixes for others 2023-07-24 23:53:12 +03:00
goksu
f9775cf1ee
used ccraw 2023-07-24 22:24:33 +03:00
goksu
893d7ac6c2
compatible with other platforms 2023-07-24 10:14:46 +03:00
goksu
1fb442f6cf
compiling and linking works as demos 2023-07-20 14:51:58 +03:00
goksu
e9362ecc59
linking by gcc 2023-07-14 16:18:00 +03:00
goksu
9c7800659d
python format 2023-07-12 15:16:41 +03:00
goksu
16411c0af7
cumulative time per N iterations 2023-07-12 14:51:56 +03:00
goksu
954d7c1ca5
loading in baseline and benchmark 2023-07-12 01:18:21 +03:00
goksu
ceb8fab9ae
html dirs 2023-07-12 00:52:31 +03:00
goksu
1afa5619ba
html font names 2023-07-05 14:00:12 +03:00
goksu
e536bca36f
rm before creation 2023-07-05 12:59:59 +03:00
goksu
e3d8aee979
testing page redesigned 2023-07-04 16:48:48 +03:00
goksu
7c7322fb5e
green coloring fixed 2023-06-29 14:07:02 +03:00
goksu
d9cc4478a2
cleaner id, date and branch 2023-06-20 17:42:09 +03:00
goksu
cc1ad531be
parameters in use 2023-06-20 16:20:27 +03:00
goksu
36f0d7c80a
parameters and id in result page 2023-06-20 16:16:53 +03:00
goksu
46551f7ef8
-c 50 arg added 2023-06-19 22:15:55 +03:00
goksu
f5713fa136
makefile fixed 2023-06-16 23:22:07 +03:00
Ahmet Göksu
79011cfffd Delete benchmark.html 2023-06-12 14:51:18 +00:00
goksu
f95a2c8279
make integrated 2023-06-12 17:44:04 +03:00
goksu
93b8d39394
Revert "files optimized"
This reverts commit 0e87ae09a6.
2023-06-12 14:06:45 +03:00
goksu
0e87ae09a6
files optimized 2023-06-11 12:10:31 +03:00
goksu
b0712e9a84
.ds_store excluded from .gitignore 2023-06-01 16:17:36 +03:00
goksu
9dd15018c2
ftbench demo with seperate makefile 2023-05-25 13:46:16 +03:00
12 changed files with 2169 additions and 0 deletions

View File

@ -162,6 +162,11 @@ FT_COMPILE := $(CC) $(ANSIFLAGS) $(INCLUDE_FLAGS) $(FT_CFLAGS)
include $(TOP_DIR)/builds/exports.mk
# Include the `testing' rules file.
#
include $(TOP_DIR)/builds/testing.mk
# Initialize the list of objects.
#
OBJECTS_LIST :=

153
builds/testing.mk Normal file
View File

@ -0,0 +1,153 @@
# Define a few important variables.
FTBENCH_DIR = $(TOP_DIR)/src/tools/ftbench
FTBENCH_SRC = $(FTBENCH_DIR)/ftbench.c
FTBENCH_OBJ = $(OBJ_DIR)/bench.$(SO)
FTBENCH_BIN = $(OBJ_DIR)/bench$E
INCLUDES = $(TOP_DIR)/include
FONTS = $(wildcard $(FTBENCH_DIR)/fonts/*.ttf)
# Define objects.
BASELINE_DIR = $(OBJ_DIR)/baseline/
BENCHMARK_DIR = $(OBJ_DIR)/benchmark/
BASELINE_INFO = $(BASELINE_DIR)info.txt
BENCHMARK_INFO = $(BENCHMARK_DIR)info.txt
HTMLCREATOR_SRC = $(FTBENCH_DIR)/src/tohtml.py
HTMLCREATOR = $(OBJ_DIR)/tohtml.py
HTMLFILE = $(OBJ_DIR)/benchmark.html
# Define flags by default
FTBENCH_FLAG ?= -c 100 -w 10
# Define test fonts all in the fonts folder.
BASELINE = $(addprefix $(BASELINE_DIR), $(notdir $(FONTS:.ttf=.txt)))
BENCHMARK = $(addprefix $(BENCHMARK_DIR), $(notdir $(FONTS:.ttf=.txt)))
FT_INCLUDES := $(OBJ_BUILD) \
$(INCLUDES)
COMPILE = $(CC) $(ANSIFLAGS) \
$(INCLUDES:%=$I%) \
$(CFLAGS)
# Enable C99 for gcc to avoid warnings.
# Note that clang++ aborts with an error if we use `-std=C99',
# so check for `++' in $(CC) also.
ifneq ($(findstring -pedantic,$(COMPILE)),)
ifeq ($(findstring ++,$(CC)),)
COMPILE += -std=c99
endif
endif
FTLIB := $(LIB_DIR)/$(LIBRARY).$A
ifeq ($(PLATFORM),unix)
# `LDFLAGS` comes from the `configure` script (via FreeType's
# `builds/unix/unix-cc.mk`), holding all linker flags necessary to
# link the FreeType library.
LINK_CMD = $(LIBTOOL) --mode=link $(CCraw) \
$(subst /,$(COMPILER_SEP),$(LDFLAGS))
LINK_LIBS = $(subst /,$(COMPILER_SEP),$(FTLIB) $(EFENCE))
else
LINK_CMD = $(CC) $(subst /,$(COMPILER_SEP),$(LDFLAGS))
ifeq ($(PLATFORM),unixdev)
# For the pure `make` call (without using `configure`) we have to add
# all needed libraries manually.
LINK_LIBS := $(subst /,$(COMPILER_SEP),$(FTLIB) $(EFENCE)) \
-lm -lrt -lz -lbz2 -lpthread
LINK_LIBS += $(shell pkg-config --libs libpng)
LINK_LIBS += $(shell pkg-config --libs harfbuzz)
LINK_LIBS += $(shell pkg-config --libs libbrotlidec)
LINK_LIBS += $(shell pkg-config --libs librsvg-2.0)
else
LINK_LIBS = $(subst /,$(COMPILER_SEP),$(FTLIB) $(EFENCE))
endif
endif
# Only on Windows we might fall back on GDI+ for PNG saving
ifeq ($(OS),Windows_NT)
LINK_LIBS += -lgdiplus
endif
####################################################################
#
# POSIX TERMIOS: Do not define if you use OLD U*ix like 4.2BSD.
#
ifeq ($(PLATFORM),unix)
EXTRAFLAGS = $DUNIX $DHAVE_POSIX_TERMIOS
endif
ifeq ($(PLATFORM),unixdev)
EXTRAFLAGS = $DUNIX $DHAVE_POSIX_TERMIOS
endif
INCLUDES := $(subst /,$(COMPILER_SEP),$(FT_INCLUDES))
# Create directories for baseline and benchmark
$(BASELINE_DIR) $(BENCHMARK_DIR):
@mkdir -p $@
# Create ftbench object
$(FTBENCH_OBJ): $(FTBENCH_SRC)
@$(COMPILE) $T$(subst /,$(COMPILER_SEP),$@ $<) $(EXTRAFLAGS)
@echo "Object created."
# Build ftbench
$(FTBENCH_BIN): $(FTBENCH_OBJ)
@echo "Linking ftbench..."
@$(LINK_CMD) $T$(subst /,$(COMPILER_SEP),$@ $<) $(LINK_LIBS)
@echo "Built."
# Copy tohtml.py into objs folder
.PHONY: copy-html-script
copy-html-script:
@cp $(HTMLCREATOR_SRC) $(OBJ_DIR)
@echo "Copied tohtml.py to $(OBJ_DIR)"
# Create a baseline
.PHONY: baseline
baseline: $(FTBENCH_BIN) $(BASELINE_DIR)
@$(RM) -f $(BASELINE)
@echo "Creating baseline..."
@echo "$(FTBENCH_FLAG)" > $(BASELINE_INFO)
@echo "`git -C $(TOP_DIR) rev-parse HEAD`" >> $(BASELINE_INFO)
@echo "`git -C $(TOP_DIR) show -s --format=%ci HEAD`" >> $(BASELINE_INFO)
@echo "`git -C $(TOP_DIR) rev-parse --abbrev-ref HEAD`" >> $(BASELINE_INFO)
@fonts=($(FONTS)); \
total_fonts=$${#fonts[@]}; \
step=0; \
for font in $${fonts[@]}; do \
step=$$((step+1)); \
percent=$$((step * 100 / total_fonts)); \
printf "\nProcessing %d%%..." $$percent; \
$(FTBENCH_BIN) $(FTBENCH_FLAG) "$$font" > $(BASELINE_DIR)$$(basename $$font .ttf).txt; \
done
@echo "Baseline created."
# Benchmark and compare to baseline
.PHONY: benchmark
benchmark: $(FTBENCH_BIN) $(BENCHMARK_DIR) copy-html-script
@$(RM) -f $(BENCHMARK) $(HTMLFILE)
@echo "Creating benchmark..."
@echo "$(FTBENCH_FLAG)" > $(BENCHMARK_INFO)
@echo "`git -C $(TOP_DIR) rev-parse HEAD`" >> $(BENCHMARK_INFO)
@echo "`git -C $(TOP_DIR) show -s --format=%ci HEAD`" >> $(BENCHMARK_INFO)
@echo "`git -C $(TOP_DIR) rev-parse --abbrev-ref HEAD`" >> $(BENCHMARK_INFO)
@fonts=($(FONTS)); \
total_fonts=$${#fonts[@]}; \
step=0; \
for font in $${fonts[@]}; do \
step=$$((step+1)); \
percent=$$((step * 100 / total_fonts)); \
printf "\nProcessing %d%%..." $$percent; \
$(FTBENCH_BIN) $(FTBENCH_FLAG) "$$font" > $(BENCHMARK_DIR)$$(basename $$font .ttf).txt; \
done
@$(PYTHON) $(HTMLCREATOR) $(OBJ_DIR)
@echo "Benchmark results created in file: $(HTMLFILE)"
.PHONY: clean-benchmark
clean-benchmark:
@echo "Cleaning..."
@$(RM) $(FTBENCH_BIN) $(FTBENCH_OBJ)
@$(RM) -rf $(BASELINE_DIR) $(BENCHMARK_DIR) $(HTMLFILE) $(HTMLCREATOR)
@echo "Cleaned"

45
src/tools/ftbench/README Normal file
View File

@ -0,0 +1,45 @@
ftbench
========
ftbench is a program designed to run FreeType benchmarks. It accepts various options and a font name to run specific tests on font rendering operations.
Each test may involve tasks such as:
. Initializing the library
. Opening the font file
. Loading and optionally rendering each glyph
. Comparing results with cached versions (if available)
. Configuring specific charmap indices, load flags, etc.
Usage is time-limited or can be explicitly set to use a maximum number of iterations per test.
Command line options
--------------------
-C Compare with cached version (if available).
-c N Use at most N iterations for each test (0 means time-limited).
-e E Set specific charmap index E.
-f L Use hex number L as load flags (see FT_LOAD_XXX'). -H NAME Use PS hinting engine NAME (default is adobe').
-I VER Use TT interpreter version VER (default is version 40).
-i I-J Forward or reverse range of glyph indices to use.
-l N Set LCD filter to N (default is 0: none).
-m M Set maximum cache size to M KiByte (default is 1024).
-p Preload font file in memory.
-r N Set render mode to N (default is 0: normal).
-s S Use S ppem as face size (default is 10ppem).
-t T Use at most T seconds per bench (default is 2).
-w N Use N iterations for warming up before each test.
-b tests Perform chosen tests (default is all).
-v Show version.
Compilation
-----------
make baseline To create a baseline for your benchmarks, use the `make baseline` command. This will compile the ftbench.c and create a set of baseline measurements in the objs/baseline/ directory.
make benchmark To run the benchmarks, use the `make benchmark` command. The results will be stored in the objs/benchmark/ directory. It will copy tohtml.py script to objs/ and generate a html file.
make clean-benchmark To remove all generated benchmark files and clean the objs directory, use the `make clean-benchmark` command.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,16 @@
#!/bin/bash
# This tool is to subset fonts.
# Define the Unicode range
unicodes="U+0021-007E"
# Loop over all .ttf files in the current directory
for fontfile in *.ttf
do
# Generate the output filename
output="${fontfile%.ttf}_subset.ttf"
# Run the pyftsubset command
pyftsubset "$fontfile" --unicodes=$unicodes --output-file="$output"
done

1588
src/tools/ftbench/ftbench.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,44 @@
/*
* This is a cheap replacement for getopt() because that routine is not
* available on some platforms and behaves differently on other platforms.
*
* This code is hereby expressly placed in the public domain.
* mleisher@crl.nmsu.edu (Mark Leisher)
* 10 October 1997
*/
#ifndef MLGETOPT_H_
#define MLGETOPT_H_
#ifdef VMS
#include <stdio.h>
#define getopt local_getopt
#define optind local_optind
#define opterr local_opterr
#define optarg local_optarg
#endif
#ifdef __cplusplus
extern "C" {
#endif
extern int opterr;
extern int optind;
extern char* optarg;
extern int getopt(
#ifdef __STDC__
int argc,
char* const* argv,
const char* pattern
#endif
);
#ifdef __cplusplus
}
#endif
#endif /* MLGETOPT_H_ */
/* End */

View File

@ -0,0 +1,318 @@
"""This script generates a HTML file from the results of ftbench"""
import os
import re
import sys
GITLAB_URL = "https://gitlab.freedesktop.org/freetype/freetype/-/commit/"
CSS_STYLE = """
<style>
table {
table-layout: fixed;
}
th, td {
padding: 3px;
text-align: center;
}
th {
background-color: #ccc;
color: black;
}
.warning{
color: red;
}
.col1 {
background-color: #eee;
}
.highlight {
background-color: #0a0;
}
</style>
"""
OBJ_DIR = sys.argv[1]
BASELINE_DIR = os.path.join(OBJ_DIR, "baseline")
BENCHMARK_DIR = os.path.join(OBJ_DIR, "benchmark")
BENCHMARK_HTML = os.path.join(OBJ_DIR, "benchmark.html")
FONT_COUNT = 5
WARNING_SAME_COMMIT = "Warning: Baseline and Benchmark have the same commit ID!"
INFO_1 = "* Average time for single iteration. Smaller values are better."
INFO_2 = "* If a value in the 'Iterations' column is given as '<i>x | y</i>', values <i>x</i> and <i>y</i> give the number of iterations in the baseline and the benchmark test, respectively."
def main():
"""Entry point for theq script"""
with open(BENCHMARK_HTML, "w") as html_file:
write_to_html(html_file, "<html>\n<head>\n")
write_to_html(html_file, CSS_STYLE)
write_to_html(html_file, "</head>\n<body>\n")
write_to_html(html_file, "<h1>Freetype Benchmark Results</h1>\n")
baseline_info = parse_info_file(os.path.join(BASELINE_DIR, "info.txt"))
benchmark_info = parse_info_file(os.path.join(BENCHMARK_DIR, "info.txt"))
if baseline_info[1].strip() == benchmark_info[1].strip():
write_to_html(
html_file,
f'<h2 class="warning">{WARNING_SAME_COMMIT}</h2>\n',
)
generate_info_table(html_file, baseline_info, benchmark_info)
# Generate total results table
generate_total_results_table(html_file, BASELINE_DIR, BENCHMARK_DIR)
# Generate results tables
for filename in os.listdir(BASELINE_DIR):
if filename.endswith(".txt") and not filename == "info.txt":
baseline_results = read_file(os.path.join(BASELINE_DIR, filename))
benchmark_results = read_file(os.path.join(BENCHMARK_DIR, filename))
generate_results_table(
html_file, baseline_results, benchmark_results, filename
)
write_to_html(html_file, "<center>Freetype Benchmark</center>\n")
write_to_html(html_file, "</body>\n</html>\n")
def write_to_html(html_file, content):
"""Write content to html file"""
html_file.write(content)
def read_file(file_path):
"""Read file and return list of lines"""
with open(file_path, "r") as f:
return f.readlines()
def parse_info_file(info_file):
"""Get info from info.txt file and return as list"""
info = read_file(info_file)
info[1] = f'<a href="{GITLAB_URL}{info[1].strip()}">{info[1][:8]}</a>\n'
return info
def generate_info_table(html_file, baseline_info, benchmark_info):
"""Prepare info table for html"""
write_to_html(html_file, "<h2>Info</h2>\n")
write_to_html(html_file, '<table border="1">\n')
write_to_html(
html_file, "<tr><th>Info</th><th>Baseline</th><th>Benchmark</th></tr>\n"
)
info_list = ["Parameters", "Commit ID", "Commit Date", "Branch"]
for info, baseline_line, benchmark_line in zip(
info_list, baseline_info, benchmark_info
):
write_to_html(
html_file,
f'<tr><td class="col1">{info}</td><td>{baseline_line.strip()}</td><td>{benchmark_line.strip()}</td></tr>\n'
)
write_to_html(html_file, "</table><br/>")
write_to_html(html_file, f"<p>{INFO_1}</p>")
write_to_html(html_file, f"<p>{INFO_2}</p>")
def generate_total_results_table(html_file, baseline_dir, benchmark_dir):
"""Prepare total results table for html"""
# This dictionary will store aggregated results.
test_results = {
test: {"baseline": 0, "benchmark": 0, "n_baseline": 0, "n_benchmark": 0}
for test in [
"Load",
"Load_Advances (Normal)",
"Load_Advances (Fast)",
"Load_Advances (Unscaled)",
"Render",
"Get_Glyph",
"Get_Char_Index",
"Iterate CMap",
"New_Face",
"Embolden",
"Stroke",
"Get_BBox",
"Get_CBox",
"New_Face & load glyph(s)",
]
}
total_time = 0
for filename in os.listdir(baseline_dir):
if filename.endswith(".txt") and not filename == "info.txt":
baseline_results = read_file(os.path.join(baseline_dir, filename))
benchmark_results = read_file(os.path.join(benchmark_dir, filename))
for baseline_line, benchmark_line in zip(
baseline_results, benchmark_results
):
if baseline_line.startswith("Total time:"):
baseline_match = re.match(r"Total time: (\d+)s", baseline_line)
benchmark_match = re.match(r"Total time: (\d+)s", benchmark_line)
if baseline_match and benchmark_match:
total_time += int(baseline_match.group(1))
total_time += int(benchmark_match.group(1))
if baseline_line.startswith(" "):
baseline_match = re.match(
r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", baseline_line
)
benchmark_match = re.match(
r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s",
benchmark_line,
)
if baseline_match and benchmark_match:
test = baseline_match.group(1).strip()
baseline_value = float(baseline_match.group(2))
benchmark_value = float(benchmark_match.group(2))
baseline_n = int(baseline_match.group(3))
benchmark_n = int(benchmark_match.group(3))
# Aggregate the results
if test in test_results:
test_results[test]["baseline"] += baseline_value
test_results[test]["benchmark"] += benchmark_value
test_results[test]["n_baseline"] += baseline_n
test_results[test]["n_benchmark"] += benchmark_n
# Writing to HTML
write_to_html(html_file, "<h2>Total Results</h2>\n")
write_to_html(html_file, '<table border="1">\n')
write_to_html(
html_file,
"<tr><th>Test</th><th>Iterations</th><th>* Baseline (&#181;s)</th>\
<th>* Benchmark (&#181;s)</th><th>Difference (%)</th></tr>\n",
)
total_baseline = total_benchmark = total_n_baseline = total_n_benchmark = 0
for test, values in test_results.items():
baseline = values["baseline"] / FONT_COUNT
benchmark = values["benchmark"] / FONT_COUNT
n_baseline = values["n_baseline"] / FONT_COUNT
n_benchmark = values["n_benchmark"] / FONT_COUNT
n_display = (
f"{n_baseline:.0f} | {n_benchmark:.0f}"
if n_baseline != n_benchmark
else int(n_baseline)
)
diff = (
((baseline - benchmark) / baseline) * 100
if not (baseline - benchmark) == 0
else 0
)
# Calculate for total row
total_baseline += baseline
total_benchmark += benchmark
total_n_baseline += n_baseline
total_n_benchmark += n_benchmark
# Check which value is smaller for color highlighting
baseline_color = "highlight" if baseline <= benchmark else ""
benchmark_color = "highlight" if benchmark <= baseline else ""
write_to_html(
html_file,
f'<tr><td class="col1">{test}</td><td>{n_display}</td>\
<td class="{baseline_color}">{baseline:.1f}</td>\
<td class="{benchmark_color}">{benchmark:.1f}</td><td>{diff:.1f}</td></tr>\n',
)
write_to_html(
html_file,
f'<tr><td class="col1">Total duration for all tests:</td><td class="col1" colspan="4">{total_time:.0f} s</td>',
)
write_to_html(html_file, "</table>\n")
def generate_results_table(html_file, baseline_results, benchmark_results, filename):
"""Prepare results table for html"""
fontname = [
line.split("/")[-1].strip("'")[:-2]
for line in baseline_results
if line.startswith("ftbench results for font")
][0]
write_to_html(html_file, f"<h3>Results for {fontname}</h2>\n")
write_to_html(html_file, '<table border="1">\n')
write_to_html(
html_file,
f'<tr><th>Test</th><th>Iterations</th>\
<th>* <a href="{ os.path.join("./baseline/", filename[:-4])}.txt">Baseline</a> (&#181;s)</th>\
<th>* <a href="{ os.path.join("./benchmark/", filename[:-4])}.txt">Benchmark</a> (&#181;s)</th>\
<th>Difference (%)</th></tr>\n'
)
total_n = total_time = 0
for baseline_line, benchmark_line in zip(baseline_results, benchmark_results):
if baseline_line.startswith("Total time:"):
baseline_match = re.match(r"Total time: (\d+)s", baseline_line)
benchmark_match = re.match(r"Total time: (\d+)s", benchmark_line)
if baseline_match and benchmark_match:
total_time += int(baseline_match.group(1))
total_time += int(benchmark_match.group(1))
if baseline_line.startswith(" "):
baseline_match = re.match(
r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", baseline_line
)
benchmark_match = re.match(
r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", benchmark_line
)
if baseline_match and benchmark_match:
baseline_value = float(baseline_match.group(2))
benchmark_value = float(benchmark_match.group(2))
percentage_diff = (
((baseline_value - benchmark_value) / baseline_value) * 100
if not (baseline_value - benchmark_value) == 0
else 0
)
baseline_n = baseline_match.group(3)
benchmark_n = benchmark_match.group(3)
n = (
baseline_n
if baseline_n == benchmark_n
else baseline_n + " | " + benchmark_n
)
total_n += int(baseline_n)
total_n += int(benchmark_n)
# Check which value is smaller for color highlighting
baseline_color = (
"highlight" if baseline_value <= benchmark_value else ""
)
benchmark_color = (
"highlight" if benchmark_value <= baseline_value else ""
)
write_to_html(
html_file,
f'<tr><td class="col1">{baseline_match.group(1)}</td><td>{n}</td>\
<td class="{baseline_color}">{baseline_value:.1f}</td><td class="{benchmark_color}">{benchmark_value:.1f}</td><td>{percentage_diff:.1f}</td></tr>\n',
)
write_to_html(
html_file,
f'<tr><td class="col1">Total duration for the font:</td><td class="col1" colspan="4">{total_time:.0f} s</td></table>\n',
)
if __name__ == "__main__":
main()