Loop Id: 562 | Module: libqmcwfs.so | Source: MultiBsplineRef.hpp:276-286 | Coverage: 0.9% |
---|
Loop Id: 562 | Module: libqmcwfs.so | Source: MultiBsplineRef.hpp:276-286 | Coverage: 0.9% |
---|
0x4c1f0 VMULSD (%R9,%RDI,1),%XMM5,%XMM2 [1] |
0x4c1f6 VMULSD (%RBX,%RDI,1),%XMM10,%XMM8 [7] |
0x4c1fb VMOVSD %XMM2,(%R9,%RDI,1) [1] |
0x4c201 VMULSD (%R13,%RDI,1),%XMM4,%XMM11 [3] |
0x4c208 VMOVSD %XMM8,(%RBX,%RDI,1) [7] |
0x4c20d VMULSD (%R12,%RDI,1),%XMM9,%XMM1 [2] |
0x4c213 VMOVSD %XMM11,(%R13,%RDI,1) [3] |
0x4c21a VMULSD (%R8,%RDI,1),%XMM3,%XMM6 [4] |
0x4c220 VMOVSD %XMM1,(%R12,%RDI,1) [2] |
0x4c226 VMULSD (%R11,%RDI,1),%XMM7,%XMM0 [8] |
0x4c22c VMOVSD %XMM6,(%R8,%RDI,1) [4] |
0x4c232 VMULSD (%R14,%RDI,1),%XMM14,%XMM13 [9] |
0x4c238 VMOVSD %XMM0,(%R11,%RDI,1) [8] |
0x4c23e VMULSD 0x8(%RBX,%RDI,1),%XMM10,%XMM0 [7] |
0x4c244 VMOVSD %XMM13,(%R14,%RDI,1) [9] |
0x4c24a VMULSD (%R15,%RDI,1),%XMM12,%XMM8 [6] |
0x4c250 VMOVSD %XMM0,0x8(%RBX,%RDI,1) [7] |
0x4c256 VMULSD 0x8(%R12,%RDI,1),%XMM9,%XMM2 [2] |
0x4c25d VMOVSD %XMM8,(%R15,%RDI,1) [6] |
0x4c263 VMULSD (%R10,%RDI,1),%XMM15,%XMM1 [5] |
0x4c269 VMOVSD %XMM2,0x8(%R12,%RDI,1) [2] |
0x4c270 VMULSD 0x8(%R11,%RDI,1),%XMM7,%XMM11 [8] |
0x4c277 VMOVSD %XMM1,(%R10,%RDI,1) [5] |
0x4c27d VMULSD 0x8(%R9,%RDI,1),%XMM5,%XMM6 [1] |
0x4c284 VMOVSD %XMM11,0x8(%R11,%RDI,1) [8] |
0x4c28b VMOVSD %XMM6,0x8(%R9,%RDI,1) [1] |
0x4c292 VMULSD 0x8(%R13,%RDI,1),%XMM4,%XMM13 [3] |
0x4c299 VMOVSD %XMM13,0x8(%R13,%RDI,1) [3] |
0x4c2a0 VMULSD 0x8(%R8,%RDI,1),%XMM3,%XMM8 [4] |
0x4c2a7 VMOVSD %XMM8,0x8(%R8,%RDI,1) [4] |
0x4c2ae VMULSD 0x8(%R14,%RDI,1),%XMM14,%XMM1 [9] |
0x4c2b5 VMOVSD %XMM1,0x8(%R14,%RDI,1) [9] |
0x4c2bc VMULSD 0x8(%R15,%RDI,1),%XMM12,%XMM0 [6] |
0x4c2c3 VMOVSD %XMM0,0x8(%R15,%RDI,1) [6] |
0x4c2ca VMULSD 0x8(%R10,%RDI,1),%XMM15,%XMM2 [5] |
0x4c2d1 VMOVSD %XMM2,0x8(%R10,%RDI,1) [5] |
0x4c2d8 ADD $0x10,%RDI |
0x4c2dc CMP %RDI,%RSI |
0x4c2df JNE 4c1f0 |
/home/eoseret/qaas_runs_CPU_9468/171-145-9236/intel/miniqmc/build/miniqmc/src/Numerics/Spline2/MultiBsplineRef.hpp: 276 - 286 |
-------------------------------------------------------------------------------- |
276: for (int n = 0; n < num_splines; n++) |
277: { |
278: gx[n] *= dxInv; |
279: gy[n] *= dyInv; |
280: gz[n] *= dzInv; |
281: hxx[n] *= dxx; |
282: hyy[n] *= dyy; |
283: hzz[n] *= dzz; |
284: hxy[n] *= dxy; |
285: hxz[n] *= dxz; |
286: hyz[n] *= dyz; |
Path / |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 1.04 |
CQA speedup if fully vectorized | 8.00 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.04 |
Bottlenecks | micro-operation queue, |
Function | _ZN16miniqmcreference19MultiBsplineEvalRef12evaluate_vghIdEEvPKN11qmcplusplus14bspline_traitsIT_Lj3EE10SplineTypeES4_S4_S4_PS4_S9_S9_m |
Source | MultiBsplineRef.hpp:276-286 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 9.33 |
CQA cycles if no scalar integer | 9.33 |
CQA cycles if FP arith vectorized | 9.00 |
CQA cycles if fully vectorized | 1.17 |
Front-end cycles | 9.33 |
DIV/SQRT cycles | 9.00 |
P0 cycles | 9.00 |
P1 cycles | 6.00 |
P2 cycles | 6.00 |
P3 cycles | 9.00 |
P4 cycles | 0.00 |
P5 cycles | 1.00 |
P6 cycles | 9.00 |
P7 cycles | 9.00 |
P8 cycles | 9.00 |
P9 cycles | 0.00 |
P10 cycles | 6.00 |
P11 cycles | 0.00 |
Inter-iter dependencies cycles | 1 |
FE+BE cycles (UFS) | 9.50 |
Stall cycles (UFS) | 0.00 |
Nb insns | 39.00 |
Nb uops | 38.00 |
Nb loads | 18.00 |
Nb stores | 18.00 |
Nb stack references | 0.00 |
FLOP/cycle | 1.93 |
Nb FLOP add-sub | 0.00 |
Nb FLOP mul | 18.00 |
Nb FLOP fma | 0.00 |
Nb FLOP div | 0.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 30.86 |
Bytes prefetched | 0.00 |
Bytes loaded | 144.00 |
Bytes stored | 144.00 |
Stride 0 | 0.00 |
Stride 1 | 9.00 |
Stride n | 0.00 |
Stride unknown | 0.00 |
Stride indirect | 0.00 |
Vectorization ratio all | 0.00 |
Vectorization ratio load | 0.00 |
Vectorization ratio store | 0.00 |
Vectorization ratio mul | 0.00 |
Vectorization ratio add_sub | NA |
Vectorization ratio fma | NA |
Vectorization ratio div_sqrt | NA |
Vectorization ratio other | NA |
Vector-efficiency ratio all | 12.50 |
Vector-efficiency ratio load | 12.50 |
Vector-efficiency ratio store | 12.50 |
Vector-efficiency ratio mul | 12.50 |
Vector-efficiency ratio add_sub | NA |
Vector-efficiency ratio fma | NA |
Vector-efficiency ratio div_sqrt | NA |
Vector-efficiency ratio other | NA |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 1.04 |
CQA speedup if fully vectorized | 8.00 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.04 |
Bottlenecks | micro-operation queue, |
Function | _ZN16miniqmcreference19MultiBsplineEvalRef12evaluate_vghIdEEvPKN11qmcplusplus14bspline_traitsIT_Lj3EE10SplineTypeES4_S4_S4_PS4_S9_S9_m |
Source | MultiBsplineRef.hpp:276-286 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 9.33 |
CQA cycles if no scalar integer | 9.33 |
CQA cycles if FP arith vectorized | 9.00 |
CQA cycles if fully vectorized | 1.17 |
Front-end cycles | 9.33 |
DIV/SQRT cycles | 9.00 |
P0 cycles | 9.00 |
P1 cycles | 6.00 |
P2 cycles | 6.00 |
P3 cycles | 9.00 |
P4 cycles | 0.00 |
P5 cycles | 1.00 |
P6 cycles | 9.00 |
P7 cycles | 9.00 |
P8 cycles | 9.00 |
P9 cycles | 0.00 |
P10 cycles | 6.00 |
P11 cycles | 0.00 |
Inter-iter dependencies cycles | 1 |
FE+BE cycles (UFS) | 9.50 |
Stall cycles (UFS) | 0.00 |
Nb insns | 39.00 |
Nb uops | 38.00 |
Nb loads | 18.00 |
Nb stores | 18.00 |
Nb stack references | 0.00 |
FLOP/cycle | 1.93 |
Nb FLOP add-sub | 0.00 |
Nb FLOP mul | 18.00 |
Nb FLOP fma | 0.00 |
Nb FLOP div | 0.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 30.86 |
Bytes prefetched | 0.00 |
Bytes loaded | 144.00 |
Bytes stored | 144.00 |
Stride 0 | 0.00 |
Stride 1 | 9.00 |
Stride n | 0.00 |
Stride unknown | 0.00 |
Stride indirect | 0.00 |
Vectorization ratio all | 0.00 |
Vectorization ratio load | 0.00 |
Vectorization ratio store | 0.00 |
Vectorization ratio mul | 0.00 |
Vectorization ratio add_sub | NA |
Vectorization ratio fma | NA |
Vectorization ratio div_sqrt | NA |
Vectorization ratio other | NA |
Vector-efficiency ratio all | 12.50 |
Vector-efficiency ratio load | 12.50 |
Vector-efficiency ratio store | 12.50 |
Vector-efficiency ratio mul | 12.50 |
Vector-efficiency ratio add_sub | NA |
Vector-efficiency ratio fma | NA |
Vector-efficiency ratio div_sqrt | NA |
Vector-efficiency ratio other | NA |
Path / |
Function | _ZN16miniqmcreference19MultiBsplineEvalRef12evaluate_vghIdEEvPKN11qmcplusplus14bspline_traitsIT_Lj3EE10SplineTypeES4_S4_S4_PS4_S9_S9_m |
Source file and lines | MultiBsplineRef.hpp:276-286 |
Module | libqmcwfs.so |
nb instructions | 39 |
nb uops | 38 |
loop length | 245 |
used x86 registers | 11 |
used mmx registers | 0 |
used xmm registers | 16 |
used ymm registers | 0 |
used zmm registers | 0 |
nb stack references | 0 |
micro-operation queue | 9.33 cycles |
front end | 9.33 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 9.00 | 9.00 | 6.00 | 6.00 | 9.00 | 0.00 | 1.00 | 9.00 | 9.00 | 9.00 | 0.00 | 6.00 |
cycles | 9.00 | 9.00 | 6.00 | 6.00 | 9.00 | 0.00 | 1.00 | 9.00 | 9.00 | 9.00 | 0.00 | 6.00 |
Cycles executing div or sqrt instructions | NA |
Longest recurrence chain latency (RecMII) | 1.00 |
FE+BE cycles | 9.50 |
Stall cycles | 0.00 |
Front-end | 9.33 |
Dispatch | 9.00 |
Data deps. | 1.00 |
Overall L1 | 9.33 |
all | 0% |
load | 0% |
store | 0% |
mul | 0% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | NA (no other vectorizable/vectorized instructions) |
all | 12% |
load | 12% |
store | 12% |
mul | 12% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | NA (no other vectorizable/vectorized instructions) |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMULSD (%R9,%RDI,1),%XMM5,%XMM2 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMULSD (%RBX,%RDI,1),%XMM10,%XMM8 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM2,(%R9,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R13,%RDI,1),%XMM4,%XMM11 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM8,(%RBX,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R12,%RDI,1),%XMM9,%XMM1 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM11,(%R13,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R8,%RDI,1),%XMM3,%XMM6 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM1,(%R12,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R11,%RDI,1),%XMM7,%XMM0 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM6,(%R8,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R14,%RDI,1),%XMM14,%XMM13 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM0,(%R11,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%RBX,%RDI,1),%XMM10,%XMM0 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM13,(%R14,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R15,%RDI,1),%XMM12,%XMM8 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM0,0x8(%RBX,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R12,%RDI,1),%XMM9,%XMM2 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM8,(%R15,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R10,%RDI,1),%XMM15,%XMM1 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM2,0x8(%R12,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R11,%RDI,1),%XMM7,%XMM11 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM1,(%R10,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R9,%RDI,1),%XMM5,%XMM6 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM11,0x8(%R11,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMOVSD %XMM6,0x8(%R9,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R13,%RDI,1),%XMM4,%XMM13 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM13,0x8(%R13,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R8,%RDI,1),%XMM3,%XMM8 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM8,0x8(%R8,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R14,%RDI,1),%XMM14,%XMM1 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM1,0x8(%R14,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R15,%RDI,1),%XMM12,%XMM0 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM0,0x8(%R15,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R10,%RDI,1),%XMM15,%XMM2 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM2,0x8(%R10,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
ADD $0x10,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %RDI,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JNE 4c1f0 <_ZN16miniqmcreference19MultiBsplineEvalRef12evaluate_vghIdEEvPKN11qmcplusplus14bspline_traitsIT_Lj3EE10SplineTypeES4_S4_S4_PS4_S9_S9_m+0x730> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
Function | _ZN16miniqmcreference19MultiBsplineEvalRef12evaluate_vghIdEEvPKN11qmcplusplus14bspline_traitsIT_Lj3EE10SplineTypeES4_S4_S4_PS4_S9_S9_m |
Source file and lines | MultiBsplineRef.hpp:276-286 |
Module | libqmcwfs.so |
nb instructions | 39 |
nb uops | 38 |
loop length | 245 |
used x86 registers | 11 |
used mmx registers | 0 |
used xmm registers | 16 |
used ymm registers | 0 |
used zmm registers | 0 |
nb stack references | 0 |
micro-operation queue | 9.33 cycles |
front end | 9.33 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 9.00 | 9.00 | 6.00 | 6.00 | 9.00 | 0.00 | 1.00 | 9.00 | 9.00 | 9.00 | 0.00 | 6.00 |
cycles | 9.00 | 9.00 | 6.00 | 6.00 | 9.00 | 0.00 | 1.00 | 9.00 | 9.00 | 9.00 | 0.00 | 6.00 |
Cycles executing div or sqrt instructions | NA |
Longest recurrence chain latency (RecMII) | 1.00 |
FE+BE cycles | 9.50 |
Stall cycles | 0.00 |
Front-end | 9.33 |
Dispatch | 9.00 |
Data deps. | 1.00 |
Overall L1 | 9.33 |
all | 0% |
load | 0% |
store | 0% |
mul | 0% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | NA (no other vectorizable/vectorized instructions) |
all | 12% |
load | 12% |
store | 12% |
mul | 12% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | NA (no other vectorizable/vectorized instructions) |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMULSD (%R9,%RDI,1),%XMM5,%XMM2 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMULSD (%RBX,%RDI,1),%XMM10,%XMM8 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM2,(%R9,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R13,%RDI,1),%XMM4,%XMM11 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM8,(%RBX,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R12,%RDI,1),%XMM9,%XMM1 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM11,(%R13,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R8,%RDI,1),%XMM3,%XMM6 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM1,(%R12,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R11,%RDI,1),%XMM7,%XMM0 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM6,(%R8,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R14,%RDI,1),%XMM14,%XMM13 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM0,(%R11,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%RBX,%RDI,1),%XMM10,%XMM0 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM13,(%R14,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R15,%RDI,1),%XMM12,%XMM8 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM0,0x8(%RBX,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R12,%RDI,1),%XMM9,%XMM2 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM8,(%R15,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD (%R10,%RDI,1),%XMM15,%XMM1 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM2,0x8(%R12,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R11,%RDI,1),%XMM7,%XMM11 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM1,(%R10,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R9,%RDI,1),%XMM5,%XMM6 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM11,0x8(%R11,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMOVSD %XMM6,0x8(%R9,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R13,%RDI,1),%XMM4,%XMM13 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM13,0x8(%R13,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R8,%RDI,1),%XMM3,%XMM8 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM8,0x8(%R8,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R14,%RDI,1),%XMM14,%XMM1 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM1,0x8(%R14,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R15,%RDI,1),%XMM12,%XMM0 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM0,0x8(%R15,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD 0x8(%R10,%RDI,1),%XMM15,%XMM2 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVSD %XMM2,0x8(%R10,%RDI,1) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
ADD $0x10,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %RDI,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JNE 4c1f0 <_ZN16miniqmcreference19MultiBsplineEvalRef12evaluate_vghIdEEvPKN11qmcplusplus14bspline_traitsIT_Lj3EE10SplineTypeES4_S4_S4_PS4_S9_S9_m+0x730> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |