[QAAS] timestamp = gmz12.benchmarkcenter.megware.com app_name = llama.cpp git_commit = dataset_name = Llama-3.1-8B-Q8_0 PP=128 NPL=4 run_cmd = -m meta-llama-3.1-8b-instruct-Q8_0.gguf -t -b 2048 -ub 512 -npp 128 -ntg 0 -npl 4 -c 16384 --seed 0 --output-format jsonl LANG = C/CXX [REPORTS] figure_of_merit_type = RATE figure_of_merit_unit = tokens/s compiler_default = aocc multicompiler_report = qaas_compilers.csv mpi_scaling = no openmp_scaling = strong scalability_report = qaas_multicore.csv scalability_reference_line = aocc:2 [SYSTEM] machine = gmz12.benchmarkcenter.megware.com model_name = AMD EPYC 9655 96-Core Processor ISA = x86_64 architecture = ZEN_V5 number_of_cpus = 384 number_of_cores = 192 number_of_sockets = 2 number_of_cores_per_socket = 96 number_of_numa_domains = 8 frequency_driver = acpi-cpufreq frequency_governor = performance scaling_max_frequency = 2600000 scaling_min_frequency = 1500000 advertized_frequency = unsupported maximal_frequency = 4509375 huge_pages = icx_version = 2025.2.0.20250806 aocc_version = 5.0.0 mpi_provider = OpenMPI mpi_version = 5.0.8 [TIME] initial_profile = 00H04M33S build_binaries = 00H11M43S multicompiler = 00H15M12S