# Standard library imports.
from typing import List
# Local library imports.
from benchmarks.eval_rate_benchmark import EvalRateBenchmark
from benchmarks.license_plate_benchmark import LicensePlateBenchmark
from benchmarks.call_audio_benchmark import CallAudioBenchmark
[docs]
class LlavaBenchmark:
"""
LLaVA Benchmark class for processing media and storing results.
Attributes:
benchmarks (list): List of benchmark instances.
"""
def __init__(self, benchmarks: List):
"""
Initialize LlavaBenchmark instance.
Args:
benchmarks (list): List of benchmark instances.
"""
self.benchmarks = benchmarks
[docs]
def store_results(self, benchmark_result: str, model: str, media_file_path: str) -> None:
"""
Store the benchmark results.
Args:
benchmark_result (subprocess.CompletedProcess): Result of the subprocess.run call.
model (str): The name of the model to use.
media_file_path (str): The media file path.
"""
for benchmark in self.benchmarks:
if isinstance(benchmark, EvalRateBenchmark):
benchmark.process_eval_rate(benchmark_result)
benchmark.store_eval_rate(media_file_path)
elif isinstance(benchmark, LicensePlateBenchmark):
benchmark.store_license_plate(model, benchmark_result)
elif isinstance(benchmark, CallAudioBenchmark):
benchmark.store_transcript(model)
benchmark.store_call_notes(model, benchmark_result)
benchmark.print_call_notes()
[docs]
def average_and_plot_benchmarks(self) -> None:
"""
Calculate the average evaluation rates and plot them.
"""
for benchmark in self.benchmarks:
if isinstance(benchmark, EvalRateBenchmark):
benchmark.average_rate()
benchmark.eval_rate_plotter.plot(benchmark.eval_rates)