Loop Id: 375 | Module: exec | Source: BsplineFunctor.h:246-260 [...] | Coverage: 0.1% |
---|
Loop Id: 375 | Module: exec | Source: BsplineFunctor.h:246-260 [...] | Coverage: 0.1% |
---|
0x4203b0 VMOVUPD 0x320(%RSP),%YMM6 [14] |
0x4203b9 VMULPD (%RDX,%RSI,8),%YMM6,%YMM6 [15] |
0x4203be VCVTTPD2DQ %YMM6,%XMM7 |
0x4203c2 VROUNDPD $0xb,%YMM6,%YMM8 |
0x4203c8 VSUBPD %YMM8,%YMM6,%YMM6 |
0x4203cd VPMOVSXDQ %XMM7,%YMM7 |
0x4203d2 VPSLLQ $0x3,%YMM7,%YMM7 |
0x4203d7 VMOVQ %RAX,%XMM8 |
0x4203dc VPBROADCASTQ %XMM8,%YMM8 |
0x4203e1 VPADDQ %YMM7,%YMM8,%YMM7 |
0x4203e5 VMOVQ %XMM7,%RDI |
0x4203ea VEXTRACTI128 $0x1,%YMM7,%XMM8 |
0x4203f0 VMOVQ %XMM8,%R8 |
0x4203f5 VPEXTRQ $0x1,%XMM7,%RBX |
0x4203fb VPEXTRQ $0x1,%XMM8,%R14 |
0x420401 VMOVSD (%RDI),%XMM8 [9] |
0x420405 VMOVSD (%R8),%XMM9 [6] |
0x42040a VPADDQ %YMM7,%YMM14,%YMM10 |
0x42040e VMOVQ %XMM10,%RDI |
0x420413 VMOVHPD (%RBX),%XMM8,%XMM8 [18] |
0x420417 VPEXTRQ $0x1,%XMM10,%R8 |
0x42041d VMOVHPD (%R14),%XMM9,%XMM9 [11] |
0x420422 VEXTRACTI128 $0x1,%YMM10,%XMM10 |
0x420428 VPEXTRQ $0x1,%XMM10,%RBX |
0x42042e VINSERTF128 $0x1,%XMM9,%YMM8,%YMM8 |
0x420434 VMOVQ %XMM10,%R14 |
0x420439 VMOVSD (%RDI),%XMM9 [5] |
0x42043d VPADDQ %YMM7,%YMM13,%YMM10 |
0x420441 VMOVSD (%R14),%XMM11 [4] |
0x420446 VPEXTRQ $0x1,%XMM10,%RDI |
0x42044c VMOVHPD (%R8),%XMM9,%XMM9 [2] |
0x420451 VMOVQ %XMM10,%R8 |
0x420456 VEXTRACTI128 $0x1,%YMM10,%XMM10 |
0x42045c VMOVHPD (%RBX),%XMM11,%XMM11 [17] |
0x420460 VMOVQ %XMM10,%RBX |
0x420465 VPEXTRQ $0x1,%XMM10,%R14 |
0x42046b VINSERTF128 $0x1,%XMM11,%YMM9,%YMM10 |
0x420471 VMOVSD (%RBX),%XMM9 [3] |
0x420475 VMOVSD (%R8),%XMM11 [7] |
0x42047a VMOVHPD (%R14),%XMM9,%XMM9 [10] |
0x42047f VMOVHPD (%RDI),%XMM11,%XMM11 [13] |
0x420483 VINSERTF128 $0x1,%XMM9,%YMM11,%YMM9 |
0x420489 VPADDQ %YMM7,%YMM12,%YMM7 |
0x42048d VMOVQ %XMM7,%R8 |
0x420492 VPEXTRQ $0x1,%XMM7,%RBX |
0x420498 VEXTRACTI128 $0x1,%YMM7,%XMM7 |
0x42049e VMOVQ %XMM7,%R14 |
0x4204a3 VPEXTRQ $0x1,%XMM7,%RDI |
0x4204a9 VMOVAPD %YMM6,%YMM7 |
0x4204ad VMOVUPD 0x2e0(%RSP),%YMM11 [14] |
0x4204b6 VFMADD132PD 0x300(%RSP),%YMM11,%YMM7 [14] |
0x4204c0 VFMADD213PD 0x2c0(%RSP),%YMM6,%YMM7 [14] |
0x4204ca VFMADD213PD 0x2a0(%RSP),%YMM6,%YMM7 [14] |
0x4204d4 VFMADD213PD %YMM5,%YMM8,%YMM7 |
0x4204d9 VMOVAPD %YMM6,%YMM5 |
0x4204dd VMOVSD (%R14),%XMM8 [8] |
0x4204e2 VMOVUPD 0x260(%RSP),%YMM11 [14] |
0x4204eb VFMADD132PD 0x280(%RSP),%YMM11,%YMM5 [14] |
0x4204f5 VFMADD213PD 0x240(%RSP),%YMM6,%YMM5 [14] |
0x4204ff VFMADD213PD 0x220(%RSP),%YMM6,%YMM5 [14] |
0x420509 VFMADD213PD %YMM7,%YMM10,%YMM5 |
0x42050e VMOVSD (%R8),%XMM7 [1] |
0x420513 VMOVAPD %YMM6,%YMM10 |
0x420517 VMOVUPD 0x1e0(%RSP),%YMM11 [14] |
0x420520 VFMADD132PD 0x200(%RSP),%YMM11,%YMM10 [14] |
0x42052a VMOVHPD (%RBX),%XMM7,%XMM7 [16] |
0x42052e VFMADD213PD %YMM2,%YMM6,%YMM10 |
0x420533 VFMADD213PD %YMM15,%YMM6,%YMM10 |
0x420538 VMOVHPD (%RDI),%XMM8,%XMM8 [12] |
0x42053c VFMADD213PD %YMM5,%YMM9,%YMM10 |
0x420541 VINSERTF128 $0x1,%XMM8,%YMM7,%YMM7 |
0x420547 VMOVAPD %YMM6,%YMM5 |
0x42054b VFMADD213PD %YMM0,%YMM3,%YMM5 |
0x420550 VFMADD213PD %YMM1,%YMM6,%YMM5 |
0x420555 VFMADD213PD %YMM4,%YMM6,%YMM5 |
0x42055a VFMADD213PD %YMM10,%YMM7,%YMM5 |
0x42055f ADD $0x4,%RSI |
0x420563 CMP %R11,%RSI |
0x420566 JB 4203b0 |
/scratch_na/users/xoserete/qaas_runs/171-417-8059/intel/miniqmc/build/miniqmc/src/QMCWaveFunctions/Jastrow/BsplineFunctor.h: 246 - 260 |
-------------------------------------------------------------------------------- |
246: for (int jat = 0; jat < iCount; jat++) |
247: { |
248: real_type r = distArrayCompressed[jat]; |
249: r *= DeltaRInv; |
250: int i = (int)r; |
251: real_type t = r - real_type(i); |
252: real_type tp0 = t * t * t; |
253: real_type tp1 = t * t; |
254: real_type tp2 = t; |
255: |
256: real_type d1 = SplineCoefs[i + 0] * (A[0] * tp0 + A[1] * tp1 + A[2] * tp2 + A[3]); |
257: real_type d2 = SplineCoefs[i + 1] * (A[4] * tp0 + A[5] * tp1 + A[6] * tp2 + A[7]); |
258: real_type d3 = SplineCoefs[i + 2] * (A[8] * tp0 + A[9] * tp1 + A[10] * tp2 + A[11]); |
259: real_type d4 = SplineCoefs[i + 3] * (A[12] * tp0 + A[13] * tp1 + A[14] * tp2 + A[15]); |
260: d += (d1 + d2 + d3 + d4); |
/usr/lib/gcc/x86_64-redhat-linux/8/../../../../include/c++/8/bits/stl_vector.h: 951 - 951 |
-------------------------------------------------------------------------------- |
951: return *(this->_M_impl._M_start + __n); |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►100.00+ | qmcplusplus::WaveFunction::eva[...] | stl_vector.h:806 | exec |
○ | qmcplusplus::NonLocalPP<double[...] | NonLocalPP.hpp:135 | exec |
○ | main.extracted.110 | NewTimer.h:249 | exec |
○ | __kmp_invoke_microtask | libiomp5.so | |
○ | __kmp_invoke_task_func | libiomp5.so |
Path / |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 1.23 |
CQA speedup if fully vectorized | 3.33 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.57 |
Bottlenecks | P0, P1, P5, |
Function | miniqmcreference::TwoBodyJastrowRef |
Source | BsplineFunctor.h:246-260,stl_vector.h:951-951 |
Source loop unroll info | unrolled by 4 |
Source loop unroll confidence level | medium |
Unroll/vectorization loop type | main |
Unroll factor | 4 |
CQA cycles | 23.33 |
CQA cycles if no scalar integer | 23.33 |
CQA cycles if FP arith vectorized | 19.00 |
CQA cycles if fully vectorized | 7.00 |
Front-end cycles | 14.83 |
DIV/SQRT cycles | 23.33 |
P0 cycles | 23.33 |
P1 cycles | 9.33 |
P2 cycles | 9.33 |
P3 cycles | 0.00 |
P4 cycles | 23.33 |
P5 cycles | 1.00 |
P6 cycles | 0.00 |
P7 cycles | 0.00 |
P8 cycles | 0.00 |
P9 cycles | 0.00 |
P10 cycles | 9.33 |
P11 cycles | 0.00 |
Inter-iter dependencies cycles | 16 |
FE+BE cycles (UFS) | 33.47 - 33.44 |
Stall cycles (UFS) | 18.32 - 18.33 |
Nb insns | 79.00 |
Nb uops | 88.00 |
Nb loads | 28.00 |
Nb stores | 0.00 |
Nb stack references | 11.00 |
FLOP/cycle | 5.83 |
Nb FLOP add-sub | 4.00 |
Nb FLOP mul | 4.00 |
Nb FLOP fma | 64.00 |
Nb FLOP div | 0.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 21.94 |
Bytes prefetched | 0.00 |
Bytes loaded | 512.00 |
Bytes stored | 0.00 |
Stride 0 | 1.00 |
Stride 1 | 1.00 |
Stride n | 0.00 |
Stride unknown | 0.00 |
Stride indirect | 4.00 |
Vectorization ratio all | 55.26 |
Vectorization ratio load | 42.86 |
Vectorization ratio store | NA |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | NA |
Vectorization ratio other | 47.06 |
Vector-efficiency ratio all | 30.26 |
Vector-efficiency ratio load | 28.57 |
Vector-efficiency ratio store | NA |
Vector-efficiency ratio mul | 50.00 |
Vector-efficiency ratio add_sub | 50.00 |
Vector-efficiency ratio fma | 50.00 |
Vector-efficiency ratio div_sqrt | NA |
Vector-efficiency ratio other | 23.53 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 1.23 |
CQA speedup if fully vectorized | 3.33 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.57 |
Bottlenecks | P0, P1, P5, |
Function | miniqmcreference::TwoBodyJastrowRef |
Source | BsplineFunctor.h:246-260,stl_vector.h:951-951 |
Source loop unroll info | unrolled by 4 |
Source loop unroll confidence level | medium |
Unroll/vectorization loop type | main |
Unroll factor | 4 |
CQA cycles | 23.33 |
CQA cycles if no scalar integer | 23.33 |
CQA cycles if FP arith vectorized | 19.00 |
CQA cycles if fully vectorized | 7.00 |
Front-end cycles | 14.83 |
DIV/SQRT cycles | 23.33 |
P0 cycles | 23.33 |
P1 cycles | 9.33 |
P2 cycles | 9.33 |
P3 cycles | 0.00 |
P4 cycles | 23.33 |
P5 cycles | 1.00 |
P6 cycles | 0.00 |
P7 cycles | 0.00 |
P8 cycles | 0.00 |
P9 cycles | 0.00 |
P10 cycles | 9.33 |
P11 cycles | 0.00 |
Inter-iter dependencies cycles | 16 |
FE+BE cycles (UFS) | 33.47 - 33.44 |
Stall cycles (UFS) | 18.32 - 18.33 |
Nb insns | 79.00 |
Nb uops | 88.00 |
Nb loads | 28.00 |
Nb stores | 0.00 |
Nb stack references | 11.00 |
FLOP/cycle | 5.83 |
Nb FLOP add-sub | 4.00 |
Nb FLOP mul | 4.00 |
Nb FLOP fma | 64.00 |
Nb FLOP div | 0.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 21.94 |
Bytes prefetched | 0.00 |
Bytes loaded | 512.00 |
Bytes stored | 0.00 |
Stride 0 | 1.00 |
Stride 1 | 1.00 |
Stride n | 0.00 |
Stride unknown | 0.00 |
Stride indirect | 4.00 |
Vectorization ratio all | 55.26 |
Vectorization ratio load | 42.86 |
Vectorization ratio store | NA |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | NA |
Vectorization ratio other | 47.06 |
Vector-efficiency ratio all | 30.26 |
Vector-efficiency ratio load | 28.57 |
Vector-efficiency ratio store | NA |
Vector-efficiency ratio mul | 50.00 |
Vector-efficiency ratio add_sub | 50.00 |
Vector-efficiency ratio fma | 50.00 |
Vector-efficiency ratio div_sqrt | NA |
Vector-efficiency ratio other | 23.53 |
Path / |
Function | miniqmcreference::TwoBodyJastrowRef |
Source file and lines | BsplineFunctor.h:246-260 |
Module | exec |
nb instructions | 79 |
nb uops | 88 |
loop length | 444 |
used x86 registers | 9 |
used mmx registers | 0 |
used xmm registers | 5 |
used ymm registers | 16 |
used zmm registers | 0 |
nb stack references | 11 |
ADD-SUB / MUL ratio | 1.00 |
micro-operation queue | 14.83 cycles |
front end | 14.83 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 23.33 | 23.33 | 9.33 | 9.33 | 0.00 | 23.33 | 1.00 | 0.00 | 0.00 | 0.00 | 0.00 | 9.33 |
cycles | 23.33 | 23.33 | 9.33 | 9.33 | 0.00 | 23.33 | 1.00 | 0.00 | 0.00 | 0.00 | 0.00 | 9.33 |
Cycles executing div or sqrt instructions | NA |
Longest recurrence chain latency (RecMII) | 16.00 |
FE+BE cycles | 33.47-33.44 |
Stall cycles | 18.32-18.33 |
LM full (events) | 22.47-22.39 |
Front-end | 14.83 |
Dispatch | 23.33 |
Data deps. | 16.00 |
Overall L1 | 23.33 |
all | 35% |
load | NA (no load vectorizable/vectorized instructions) |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | 100% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 25% |
all | 66% |
load | 42% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 100% |
all | 55% |
load | 42% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 47% |
all | 21% |
load | NA (no load vectorizable/vectorized instructions) |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | 50% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 16% |
all | 35% |
load | 28% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 50% |
add-sub | 50% |
fma | 50% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 40% |
all | 30% |
load | 28% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 50% |
add-sub | 50% |
fma | 50% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 23% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVUPD 0x320(%RSP),%YMM6 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMULPD (%RDX,%RSI,8),%YMM6,%YMM6 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VCVTTPD2DQ %YMM6,%XMM7 | 2 | 0.50 | 0.50 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 1 |
VROUNDPD $0xb,%YMM6,%YMM8 | 2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 1 |
VSUBPD %YMM8,%YMM6,%YMM6 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VPMOVSXDQ %XMM7,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSLLQ $0x3,%YMM7,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 0.50 |
VMOVQ %RAX,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPBROADCASTQ %XMM8,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDQ %YMM7,%YMM8,%YMM7 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VMOVQ %XMM7,%RDI | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VEXTRACTI128 $0x1,%YMM7,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVQ %XMM8,%R8 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPEXTRQ $0x1,%XMM7,%RBX | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VPEXTRQ $0x1,%XMM8,%R14 | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VMOVSD (%RDI),%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD (%R8),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VPADDQ %YMM7,%YMM14,%YMM10 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VMOVQ %XMM10,%RDI | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VMOVHPD (%RBX),%XMM8,%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VPEXTRQ $0x1,%XMM10,%R8 | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VMOVHPD (%R14),%XMM9,%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VEXTRACTI128 $0x1,%YMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPEXTRQ $0x1,%XMM10,%RBX | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VINSERTF128 $0x1,%XMM9,%YMM8,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVQ %XMM10,%R14 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VMOVSD (%RDI),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VPADDQ %YMM7,%YMM13,%YMM10 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VMOVSD (%R14),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VPEXTRQ $0x1,%XMM10,%RDI | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VMOVHPD (%R8),%XMM9,%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VMOVQ %XMM10,%R8 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VEXTRACTI128 $0x1,%YMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVHPD (%RBX),%XMM11,%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VMOVQ %XMM10,%RBX | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPEXTRQ $0x1,%XMM10,%R14 | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VINSERTF128 $0x1,%XMM11,%YMM9,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVSD (%RBX),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD (%R8),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVHPD (%R14),%XMM9,%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VMOVHPD (%RDI),%XMM11,%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VINSERTF128 $0x1,%XMM9,%YMM11,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDQ %YMM7,%YMM12,%YMM7 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VMOVQ %XMM7,%R8 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPEXTRQ $0x1,%XMM7,%RBX | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VEXTRACTI128 $0x1,%YMM7,%XMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVQ %XMM7,%R14 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPEXTRQ $0x1,%XMM7,%RDI | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VMOVAPD %YMM6,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVUPD 0x2e0(%RSP),%YMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132PD 0x300(%RSP),%YMM11,%YMM7 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD 0x2c0(%RSP),%YMM6,%YMM7 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD 0x2a0(%RSP),%YMM6,%YMM7 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD %YMM5,%YMM8,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %YMM6,%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVSD (%R14),%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVUPD 0x260(%RSP),%YMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132PD 0x280(%RSP),%YMM11,%YMM5 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD 0x240(%RSP),%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD 0x220(%RSP),%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD %YMM7,%YMM10,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVSD (%R8),%XMM7 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVAPD %YMM6,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVUPD 0x1e0(%RSP),%YMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132PD 0x200(%RSP),%YMM11,%YMM10 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVHPD (%RBX),%XMM7,%XMM7 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VFMADD213PD %YMM2,%YMM6,%YMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213PD %YMM15,%YMM6,%YMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVHPD (%RDI),%XMM8,%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VFMADD213PD %YMM5,%YMM9,%YMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VINSERTF128 $0x1,%XMM8,%YMM7,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %YMM6,%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD213PD %YMM0,%YMM3,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213PD %YMM1,%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213PD %YMM4,%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213PD %YMM10,%YMM7,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
ADD $0x4,%RSI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %R11,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JB 4203b0 <_ZN16miniqmcreference17TwoBodyJastrowRefIN11qmcplusplus14BsplineFunctorIdEEE14evaluateRatiosERNS1_18VirtualParticleSetERSt6vectorIdSaIdEE+0x420> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
Function | miniqmcreference::TwoBodyJastrowRef |
Source file and lines | BsplineFunctor.h:246-260 |
Module | exec |
nb instructions | 79 |
nb uops | 88 |
loop length | 444 |
used x86 registers | 9 |
used mmx registers | 0 |
used xmm registers | 5 |
used ymm registers | 16 |
used zmm registers | 0 |
nb stack references | 11 |
ADD-SUB / MUL ratio | 1.00 |
micro-operation queue | 14.83 cycles |
front end | 14.83 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 23.33 | 23.33 | 9.33 | 9.33 | 0.00 | 23.33 | 1.00 | 0.00 | 0.00 | 0.00 | 0.00 | 9.33 |
cycles | 23.33 | 23.33 | 9.33 | 9.33 | 0.00 | 23.33 | 1.00 | 0.00 | 0.00 | 0.00 | 0.00 | 9.33 |
Cycles executing div or sqrt instructions | NA |
Longest recurrence chain latency (RecMII) | 16.00 |
FE+BE cycles | 33.47-33.44 |
Stall cycles | 18.32-18.33 |
LM full (events) | 22.47-22.39 |
Front-end | 14.83 |
Dispatch | 23.33 |
Data deps. | 16.00 |
Overall L1 | 23.33 |
all | 35% |
load | NA (no load vectorizable/vectorized instructions) |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | 100% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 25% |
all | 66% |
load | 42% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 100% |
all | 55% |
load | 42% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 47% |
all | 21% |
load | NA (no load vectorizable/vectorized instructions) |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | 50% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 16% |
all | 35% |
load | 28% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 50% |
add-sub | 50% |
fma | 50% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 40% |
all | 30% |
load | 28% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 50% |
add-sub | 50% |
fma | 50% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 23% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVUPD 0x320(%RSP),%YMM6 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMULPD (%RDX,%RSI,8),%YMM6,%YMM6 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VCVTTPD2DQ %YMM6,%XMM7 | 2 | 0.50 | 0.50 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 1 |
VROUNDPD $0xb,%YMM6,%YMM8 | 2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 1 |
VSUBPD %YMM8,%YMM6,%YMM6 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VPMOVSXDQ %XMM7,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSLLQ $0x3,%YMM7,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 0.50 |
VMOVQ %RAX,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPBROADCASTQ %XMM8,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDQ %YMM7,%YMM8,%YMM7 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VMOVQ %XMM7,%RDI | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VEXTRACTI128 $0x1,%YMM7,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVQ %XMM8,%R8 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPEXTRQ $0x1,%XMM7,%RBX | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VPEXTRQ $0x1,%XMM8,%R14 | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VMOVSD (%RDI),%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD (%R8),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VPADDQ %YMM7,%YMM14,%YMM10 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VMOVQ %XMM10,%RDI | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VMOVHPD (%RBX),%XMM8,%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VPEXTRQ $0x1,%XMM10,%R8 | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VMOVHPD (%R14),%XMM9,%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VEXTRACTI128 $0x1,%YMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPEXTRQ $0x1,%XMM10,%RBX | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VINSERTF128 $0x1,%XMM9,%YMM8,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVQ %XMM10,%R14 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VMOVSD (%RDI),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VPADDQ %YMM7,%YMM13,%YMM10 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VMOVSD (%R14),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VPEXTRQ $0x1,%XMM10,%RDI | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VMOVHPD (%R8),%XMM9,%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VMOVQ %XMM10,%R8 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VEXTRACTI128 $0x1,%YMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVHPD (%RBX),%XMM11,%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VMOVQ %XMM10,%RBX | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPEXTRQ $0x1,%XMM10,%R14 | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VINSERTF128 $0x1,%XMM11,%YMM9,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVSD (%RBX),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD (%R8),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVHPD (%R14),%XMM9,%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VMOVHPD (%RDI),%XMM11,%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VINSERTF128 $0x1,%XMM9,%YMM11,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDQ %YMM7,%YMM12,%YMM7 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VMOVQ %XMM7,%R8 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPEXTRQ $0x1,%XMM7,%RBX | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VEXTRACTI128 $0x1,%YMM7,%XMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVQ %XMM7,%R14 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPEXTRQ $0x1,%XMM7,%RDI | 2 | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 |
VMOVAPD %YMM6,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVUPD 0x2e0(%RSP),%YMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132PD 0x300(%RSP),%YMM11,%YMM7 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD 0x2c0(%RSP),%YMM6,%YMM7 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD 0x2a0(%RSP),%YMM6,%YMM7 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD %YMM5,%YMM8,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %YMM6,%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVSD (%R14),%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVUPD 0x260(%RSP),%YMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132PD 0x280(%RSP),%YMM11,%YMM5 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD 0x240(%RSP),%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD 0x220(%RSP),%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VFMADD213PD %YMM7,%YMM10,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVSD (%R8),%XMM7 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVAPD %YMM6,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVUPD 0x1e0(%RSP),%YMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132PD 0x200(%RSP),%YMM11,%YMM10 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVHPD (%RBX),%XMM7,%XMM7 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VFMADD213PD %YMM2,%YMM6,%YMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213PD %YMM15,%YMM6,%YMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVHPD (%RDI),%XMM8,%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4-12 | 1 |
VFMADD213PD %YMM5,%YMM9,%YMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VINSERTF128 $0x1,%XMM8,%YMM7,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %YMM6,%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD213PD %YMM0,%YMM3,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213PD %YMM1,%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213PD %YMM4,%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213PD %YMM10,%YMM7,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
ADD $0x4,%RSI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %R11,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JB 4203b0 <_ZN16miniqmcreference17TwoBodyJastrowRefIN11qmcplusplus14BsplineFunctorIdEEE14evaluateRatiosERNS1_18VirtualParticleSetERSt6vectorIdSaIdEE+0x420> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |