Loop Id: 314 | Module: exec | Source: BsplineFunctor.h:303-338 | Coverage: 0.01% |
---|
Loop Id: 314 | Module: exec | Source: BsplineFunctor.h:303-338 | Coverage: 0.01% |
---|
0x41cf40 VMOVSD (%R8,%RDI,8),%XMM2 [2] |
0x41cf46 VMOVSD %XMM2,0x40(%RSP) [8] |
0x41cf4c VMULSD %XMM3,%XMM2,%XMM2 |
0x41cf50 VCVTTSD2SI %XMM2,%R8D |
0x41cf54 VROUNDSD $0xb,%XMM2,%XMM2,%XMM3 |
0x41cf5a MOVSXD %R8D,%R8 |
0x41cf5d VMOVSD (%RSI,%R8,8),%XMM8 [6] |
0x41cf63 VMOVSD 0x8(%RSI,%R8,8),%XMM6 [6] |
0x41cf6a VMOVSD 0x10(%RSI,%R8,8),%XMM5 [6] |
0x41cf71 VSUBSD %XMM3,%XMM2,%XMM2 |
0x41cf75 VMOVSD 0x18(%RSI,%R8,8),%XMM3 [6] |
0x41cf7c VMULSD %XMM2,%XMM2,%XMM7 |
0x41cf80 VMULSD %XMM2,%XMM7,%XMM4 |
0x41cf84 VMOVAPD %XMM2,%XMM9 |
0x41cf88 VFMADD213SD %XMM1,%XMM11,%XMM9 |
0x41cf8d VMOVAPD %YMM10,%YMM13 |
0x41cf92 VMOVAPD %XMM2,%XMM10 |
0x41cf96 VMULSD %XMM8,%XMM9,%XMM9 |
0x41cf9b VMOVUPD 0x190(%RSP),%XMM1 [8] |
0x41cfa4 VFMADD132SD 0xf0(%RSP),%XMM1,%XMM10 [8] |
0x41cfae VMOVAPD %XMM2,%XMM11 |
0x41cfb2 VMOVUPD 0x170(%RSP),%XMM1 [8] |
0x41cfbb VFMADD132SD 0x180(%RSP),%XMM1,%XMM11 [8] |
0x41cfc5 VUNPCKLPD %XMM2,%XMM7,%XMM7 |
0x41cfc9 VFMADD213SD %XMM9,%XMM6,%XMM10 |
0x41cfce VMOVUPD 0x150(%RSP),%XMM1 [8] |
0x41cfd7 VFMADD132SD 0x160(%RSP),%XMM1,%XMM2 [8] |
0x41cfe1 VMULPD 0x6a0(%RSP),%XMM7,%XMM9 [8] |
0x41cfea VMOVAPD %XMM14,%XMM1 |
0x41cfee VMOVAPD %XMM0,%XMM14 |
0x41cff2 VMOVAPD %YMM15,%YMM0 |
0x41cff6 VMOVAPD %YMM12,%YMM15 |
0x41cffb VSHUFPD $0x1,%XMM9,%XMM9,%XMM12 |
0x41d001 VFMADD213SD %XMM10,%XMM5,%XMM11 |
0x41d006 VADDSD %XMM9,%XMM12,%XMM9 |
0x41d00b VADDSD 0x140(%RSP),%XMM9,%XMM9 [8] |
0x41d014 VMULPD 0x680(%RSP),%XMM7,%XMM10 [8] |
0x41d01d VMULSD %XMM8,%XMM9,%XMM9 |
0x41d022 VSHUFPD $0x1,%XMM10,%XMM10,%XMM12 |
0x41d028 VADDSD %XMM10,%XMM12,%XMM10 |
0x41d02d VADDSD 0x130(%RSP),%XMM10,%XMM10 [8] |
0x41d036 VFMADD213SD %XMM11,%XMM3,%XMM2 |
0x41d03b VMULPD 0x200(%RSP),%XMM7,%XMM11 [8] |
0x41d044 VSHUFPD $0x1,%XMM11,%XMM11,%XMM12 |
0x41d04a VADDSD %XMM11,%XMM12,%XMM11 |
0x41d04f VFMADD213SD %XMM9,%XMM6,%XMM10 |
0x41d054 VMULPD 0x1c0(%RSP),%XMM7,%XMM9 [8] |
0x41d05d VMOVAPD %XMM4,%XMM12 |
0x41d061 VFMADD132SD 0xb0(%RSP),%XMM9,%XMM12 [8] |
0x41d06b VSHUFPD $0x1,%XMM9,%XMM9,%XMM9 |
0x41d071 VADDSD 0xa0(%RSP),%XMM9,%XMM9 [8] |
0x41d07a VADDSD %XMM9,%XMM12,%XMM9 |
0x41d07f VMOVAPD %YMM15,%YMM12 |
0x41d084 VMOVAPD %YMM0,%YMM15 |
0x41d088 VMOVAPD %XMM14,%XMM0 |
0x41d08c VMOVAPD %XMM1,%XMM14 |
0x41d090 VADDSD 0xd0(%RSP),%XMM11,%XMM11 [8] |
0x41d099 VMULSD %XMM8,%XMM9,%XMM8 |
0x41d09e VFMADD213SD %XMM10,%XMM5,%XMM11 |
0x41d0a3 VMULPD %XMM7,%XMM12,%XMM9 |
0x41d0a7 VMOVAPD %XMM4,%XMM10 |
0x41d0ab VFMADD213SD %XMM9,%XMM1,%XMM10 |
0x41d0b0 VSHUFPD $0x1,%XMM9,%XMM9,%XMM9 |
0x41d0b6 VADDSD %XMM0,%XMM9,%XMM9 |
0x41d0ba VADDSD %XMM9,%XMM10,%XMM9 |
0x41d0bf VMOVAPD %YMM13,%YMM10 |
0x41d0c4 VMOVUPD 0x80(%RSP),%XMM13 [8] |
0x41d0cd VFMADD213SD %XMM8,%XMM6,%XMM9 |
0x41d0d2 VMULPD %XMM7,%XMM10,%XMM6 |
0x41d0d6 VMOVAPD %XMM4,%XMM8 |
0x41d0da VFMADD132SD 0x90(%RSP),%XMM6,%XMM8 [8] |
0x41d0e4 VSHUFPD $0x1,%XMM6,%XMM6,%XMM6 |
0x41d0e9 VADDSD %XMM6,%XMM13,%XMM6 |
0x41d0ed VADDSD %XMM6,%XMM8,%XMM6 |
0x41d0f1 VFMADD213SD %XMM9,%XMM5,%XMM6 |
0x41d0f6 VMOVUPD 0x70(%RSP),%XMM9 [8] |
0x41d0fc VMULPD 0x1e0(%RSP),%XMM7,%XMM5 [8] |
0x41d105 VSHUFPD $0x1,%XMM5,%XMM5,%XMM8 |
0x41d10a VADDSD %XMM5,%XMM8,%XMM5 |
0x41d10e VMOVUPD 0x120(%RSP),%XMM8 [8] |
0x41d117 VADDSD 0xc0(%RSP),%XMM5,%XMM5 [8] |
0x41d120 VFMADD213SD %XMM11,%XMM3,%XMM5 |
0x41d125 VMULPD %XMM7,%XMM15,%XMM7 |
0x41d129 VFMADD213SD %XMM7,%XMM9,%XMM4 |
0x41d12e VSHUFPD $0x1,%XMM7,%XMM7,%XMM7 |
0x41d133 VADDSD %XMM7,%XMM8,%XMM7 |
0x41d137 VADDSD %XMM7,%XMM4,%XMM4 |
0x41d13b VFMADD213SD %XMM6,%XMM3,%XMM4 |
0x41d140 VMOVUPD 0x110(%RSP),%XMM6 [8] |
0x41d149 VMOVUPD 0xe0(%RSP),%XMM3 [8] |
0x41d152 MOVSXD (%RAX,%RDI,4),%R8 [1] |
0x41d156 ADD %R11,%R8 |
0x41d159 VMULSD %XMM2,%XMM6,%XMM2 |
0x41d15d VMOVSD %XMM2,(%RCX,%R8,8) [4] |
0x41d163 VMULSD %XMM3,%XMM5,%XMM2 |
0x41d167 VDIVSD 0x40(%RSP),%XMM2,%XMM1 [8] |
0x41d16d VMOVSD %XMM1,(%RDX,%R8,8) [5] |
0x41d173 VMOVUPD (%RSP),%XMM1 [8] |
0x41d178 VMOVUPD 0x100(%RSP),%XMM11 [8] |
0x41d181 VMOVSD %XMM4,(%R9,%R8,8) [3] |
0x41d187 MOV 0x20(%RBP),%R8 [7] |
0x41d18b INC %RDI |
0x41d18e CMP %RDI,%R10 |
0x41d191 JNE 41cf40 |
/scratch_na/users/xoserete/qaas_runs/171-417-8059/intel/miniqmc/build/miniqmc/src/QMCWaveFunctions/Jastrow/BsplineFunctor.h: 303 - 338 |
-------------------------------------------------------------------------------- |
303: for (int j = 0; j < iCount; j++) |
304: { |
305: real_type r = distArrayCompressed[j]; |
306: int iScatter = distIndices[j]; |
307: real_type rinv = cOne / r; |
308: r *= DeltaRInv; |
309: int iGather = (int)r; |
310: real_type t = r - real_type(iGather); |
311: real_type tp0 = t * t * t; |
312: real_type tp1 = t * t; |
313: real_type tp2 = t; |
314: |
315: real_type sCoef0 = SplineCoefs[iGather + 0]; |
316: real_type sCoef1 = SplineCoefs[iGather + 1]; |
317: real_type sCoef2 = SplineCoefs[iGather + 2]; |
318: real_type sCoef3 = SplineCoefs[iGather + 3]; |
319: |
320: // clang-format off |
321: laplArray[iScatter] = dSquareDeltaRinv * |
322: (sCoef0*( d2A[ 2]*tp2 + d2A[ 3])+ |
323: sCoef1*( d2A[ 6]*tp2 + d2A[ 7])+ |
324: sCoef2*( d2A[10]*tp2 + d2A[11])+ |
325: sCoef3*( d2A[14]*tp2 + d2A[15])); |
326: |
327: gradArray[iScatter] = DeltaRInv * rinv * |
328: (sCoef0*( dA[ 1]*tp1 + dA[ 2]*tp2 + dA[ 3])+ |
329: sCoef1*( dA[ 5]*tp1 + dA[ 6]*tp2 + dA[ 7])+ |
330: sCoef2*( dA[ 9]*tp1 + dA[10]*tp2 + dA[11])+ |
331: sCoef3*( dA[13]*tp1 + dA[14]*tp2 + dA[15])); |
332: |
333: valArray[iScatter] = (sCoef0*(A[ 0]*tp0 + A[ 1]*tp1 + A[ 2]*tp2 + A[ 3])+ |
334: sCoef1*(A[ 4]*tp0 + A[ 5]*tp1 + A[ 6]*tp2 + A[ 7])+ |
335: sCoef2*(A[ 8]*tp0 + A[ 9]*tp1 + A[10]*tp2 + A[11])+ |
336: sCoef3*(A[12]*tp0 + A[13]*tp1 + A[14]*tp2 + A[15])); |
337: // clang-format on |
338: } |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►43.31+ | miniqmcreference::TwoBodyJastr[...] | TwoBodyJastrowRef.h:274 | exec |
○ | qmcplusplus::WaveFunction::rat[...] | WaveFunction.cpp:201 | exec |
○ | main.extracted.110 | refwrap.h:313 | exec |
○ | __kmp_invoke_microtask | libiomp5.so | |
○ | __kmp_invoke_task_func | libiomp5.so | |
►33.12+ | miniqmcreference::TwoBodyJastr[...] | TwoBodyJastrowRef.h:274 | exec |
○ | qmcplusplus::WaveFunction::acc[...] | NewTimer.h:249 | exec |
○ | main.extracted.110 | refwrap.h:313 | exec |
○ | __kmp_invoke_microtask | libiomp5.so | |
○ | __kmp_invoke_task_func | libiomp5.so | |
►19.11+ | miniqmcreference::OneBodyJastr[...] | OneBodyJastrowRef.h:218 | exec |
○ | miniqmcreference::OneBodyJastr[...] | stl_vector.h:1056 | exec |
○ | qmcplusplus::WaveFunction::rat[...] | WaveFunction.cpp:201 | exec |
○ | main.extracted.110 | refwrap.h:313 | exec |
○ | __kmp_invoke_microtask | libiomp5.so | |
○ | __kmp_invoke_task_func | libiomp5.so | |
►3.18+ | miniqmcreference::TwoBodyJastr[...] | TwoBodyJastrowRef.h:274 | exec |
○ | miniqmcreference::TwoBodyJastr[...] | TwoBodyJastrowRef.h:411 | exec |
○ | qmcplusplus::WaveFunction::eva[...] | WaveFunction.cpp:175 | exec |
○ | main.extracted.113 | miniqmc.cpp:397 | exec |
○ | __kmp_invoke_microtask | libiomp5.so | |
○ | __kmp_invoke_task_func | libiomp5.so | |
►1.27+ | miniqmcreference::OneBodyJastr[...] | OneBodyJastrowRef.h:218 | exec |
○ | miniqmcreference::OneBodyJastr[...] | OneBodyJastrowRef.h:112 | exec |
○ | miniqmcreference::OneBodyJastr[...] | OneBodyJastrowRef.h:122 | exec |
○ | qmcplusplus::WaveFunction::eva[...] | WaveFunction.cpp:175 | exec |
○ | main.extracted.113 | miniqmc.cpp:397 | exec |
○ | __kmp_invoke_microtask | libiomp5.so | |
○ | __kmp_invoke_task_func | libiomp5.so |
Path / |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 2.40 |
CQA speedup if fully vectorized | 6.90 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.14 |
Bottlenecks | P1, |
Function | qmcplusplus::BsplineFunctor |
Source | BsplineFunctor.h:303-338 |
Source loop unroll info | unrolled by 4 |
Source loop unroll confidence level | high |
Unroll/vectorization loop type | peel/tail |
Unroll factor | 1 |
CQA cycles | 25.00 |
CQA cycles if no scalar integer | 25.00 |
CQA cycles if FP arith vectorized | 10.42 |
CQA cycles if fully vectorized | 3.63 |
Front-end cycles | 17.50 |
DIV/SQRT cycles | 22.00 |
P0 cycles | 25.00 |
P1 cycles | 11.00 |
P2 cycles | 11.00 |
P3 cycles | 2.00 |
P4 cycles | 20.50 |
P5 cycles | 1.60 |
P6 cycles | 2.00 |
P7 cycles | 2.00 |
P8 cycles | 2.00 |
P9 cycles | 1.40 |
P10 cycles | 11.00 |
P11 cycles | 4.00 |
Inter-iter dependencies cycles | 1 - 2 |
FE+BE cycles (UFS) | 26.22 - 26.00 |
Stall cycles (UFS) | 8.03 - 7.80 |
Nb insns | 104.00 |
Nb uops | 105.00 |
Nb loads | 33.00 |
Nb stores | 4.00 |
Nb stack references | 27.00 |
FLOP/cycle | 3.04 |
Nb FLOP add-sub | 17.00 |
Nb FLOP mul | 24.00 |
Nb FLOP fma | 17.00 |
Nb FLOP div | 1.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 16.48 |
Bytes prefetched | 0.00 |
Bytes loaded | 380.00 |
Bytes stored | 32.00 |
Stride 0 | 2.00 |
Stride 1 | 1.00 |
Stride n | 0.00 |
Stride unknown | 5.00 |
Stride indirect | 0.00 |
Vectorization ratio all | 43.30 |
Vectorization ratio load | 48.39 |
Vectorization ratio store | 0.00 |
Vectorization ratio mul | 50.00 |
Vectorization ratio add_sub | 0.00 |
Vectorization ratio fma | 0.00 |
Vectorization ratio div_sqrt | 0.00 |
Vectorization ratio other | 88.89 |
Vector-efficiency ratio all | 19.46 |
Vector-efficiency ratio load | 18.55 |
Vector-efficiency ratio store | 12.50 |
Vector-efficiency ratio mul | 18.75 |
Vector-efficiency ratio add_sub | 12.50 |
Vector-efficiency ratio fma | 12.50 |
Vector-efficiency ratio div_sqrt | 12.50 |
Vector-efficiency ratio other | 29.17 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 2.40 |
CQA speedup if fully vectorized | 6.90 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.14 |
Bottlenecks | P1, |
Function | qmcplusplus::BsplineFunctor |
Source | BsplineFunctor.h:303-338 |
Source loop unroll info | unrolled by 4 |
Source loop unroll confidence level | high |
Unroll/vectorization loop type | peel/tail |
Unroll factor | 1 |
CQA cycles | 25.00 |
CQA cycles if no scalar integer | 25.00 |
CQA cycles if FP arith vectorized | 10.42 |
CQA cycles if fully vectorized | 3.63 |
Front-end cycles | 17.50 |
DIV/SQRT cycles | 22.00 |
P0 cycles | 25.00 |
P1 cycles | 11.00 |
P2 cycles | 11.00 |
P3 cycles | 2.00 |
P4 cycles | 20.50 |
P5 cycles | 1.60 |
P6 cycles | 2.00 |
P7 cycles | 2.00 |
P8 cycles | 2.00 |
P9 cycles | 1.40 |
P10 cycles | 11.00 |
P11 cycles | 4.00 |
Inter-iter dependencies cycles | 1 - 2 |
FE+BE cycles (UFS) | 26.22 - 26.00 |
Stall cycles (UFS) | 8.03 - 7.80 |
Nb insns | 104.00 |
Nb uops | 105.00 |
Nb loads | 33.00 |
Nb stores | 4.00 |
Nb stack references | 27.00 |
FLOP/cycle | 3.04 |
Nb FLOP add-sub | 17.00 |
Nb FLOP mul | 24.00 |
Nb FLOP fma | 17.00 |
Nb FLOP div | 1.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 16.48 |
Bytes prefetched | 0.00 |
Bytes loaded | 380.00 |
Bytes stored | 32.00 |
Stride 0 | 2.00 |
Stride 1 | 1.00 |
Stride n | 0.00 |
Stride unknown | 5.00 |
Stride indirect | 0.00 |
Vectorization ratio all | 43.30 |
Vectorization ratio load | 48.39 |
Vectorization ratio store | 0.00 |
Vectorization ratio mul | 50.00 |
Vectorization ratio add_sub | 0.00 |
Vectorization ratio fma | 0.00 |
Vectorization ratio div_sqrt | 0.00 |
Vectorization ratio other | 88.89 |
Vector-efficiency ratio all | 19.46 |
Vector-efficiency ratio load | 18.55 |
Vector-efficiency ratio store | 12.50 |
Vector-efficiency ratio mul | 18.75 |
Vector-efficiency ratio add_sub | 12.50 |
Vector-efficiency ratio fma | 12.50 |
Vector-efficiency ratio div_sqrt | 12.50 |
Vector-efficiency ratio other | 29.17 |
Path / |
Function | qmcplusplus::BsplineFunctor |
Source file and lines | BsplineFunctor.h:303-338 |
Module | exec |
nb instructions | 104 |
nb uops | 105 |
loop length | 599 |
used x86 registers | 11 |
used mmx registers | 0 |
used xmm registers | 16 |
used ymm registers | 5 |
used zmm registers | 0 |
nb stack references | 27 |
ADD-SUB / MUL ratio | 1.06 |
micro-operation queue | 17.50 cycles |
front end | 17.50 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 22.00 | 21.50 | 11.00 | 11.00 | 2.00 | 20.50 | 1.60 | 2.00 | 2.00 | 2.00 | 1.40 | 11.00 |
cycles | 22.00 | 25.00 | 11.00 | 11.00 | 2.00 | 20.50 | 1.60 | 2.00 | 2.00 | 2.00 | 1.40 | 11.00 |
Cycles executing div or sqrt instructions | 4.00 |
Longest recurrence chain latency (RecMII) | 1.00-2.00 |
FE+BE cycles | 26.22-26.00 |
Stall cycles | 8.03-7.80 |
RS full (events) | 18.90-18.10 |
PRF_FLOAT full (events) | 0.09-0.43 |
Front-end | 17.50 |
Dispatch | 25.00 |
DIV/SQRT | 4.00 |
Data deps. | 1.00-2.00 |
Overall L1 | 25.00 |
all | 43% |
load | 48% |
store | 0% |
mul | 50% |
add-sub | 0% |
fma | 0% |
div/sqrt | 0% |
other | 88% |
all | 19% |
load | 18% |
store | 12% |
mul | 18% |
add-sub | 12% |
fma | 12% |
div/sqrt | 12% |
other | 29% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVSD (%R8,%RDI,8),%XMM2 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD %XMM2,0x40(%RSP) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD %XMM3,%XMM2,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCVTTSD2SI %XMM2,%R8D | 2 | 1.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 1 |
VROUNDSD $0xb,%XMM2,%XMM2,%XMM3 | 2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 1 |
MOVSXD %R8D,%R8 | 1 | 0 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0.33 | 0 | 1 | 0.33 |
VMOVSD (%RSI,%R8,8),%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD 0x8(%RSI,%R8,8),%XMM6 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD 0x10(%RSI,%R8,8),%XMM5 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VSUBSD %XMM3,%XMM2,%XMM2 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVSD 0x18(%RSI,%R8,8),%XMM3 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMULSD %XMM2,%XMM2,%XMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULSD %XMM2,%XMM7,%XMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %XMM2,%XMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD213SD %XMM1,%XMM11,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %YMM10,%YMM13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %XMM2,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMULSD %XMM8,%XMM9,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD 0x190(%RSP),%XMM1 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132SD 0xf0(%RSP),%XMM1,%XMM10 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVAPD %XMM2,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVUPD 0x170(%RSP),%XMM1 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132SD 0x180(%RSP),%XMM1,%XMM11 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VUNPCKLPD %XMM2,%XMM7,%XMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VFMADD213SD %XMM9,%XMM6,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD 0x150(%RSP),%XMM1 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132SD 0x160(%RSP),%XMM1,%XMM2 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMULPD 0x6a0(%RSP),%XMM7,%XMM9 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVAPD %XMM14,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %XMM0,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %YMM15,%YMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %YMM12,%YMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VSHUFPD $0x1,%XMM9,%XMM9,%XMM12 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VFMADD213SD %XMM10,%XMM5,%XMM11 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VADDSD %XMM9,%XMM12,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD 0x140(%RSP),%XMM9,%XMM9 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VMULPD 0x680(%RSP),%XMM7,%XMM10 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMULSD %XMM8,%XMM9,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VSHUFPD $0x1,%XMM10,%XMM10,%XMM12 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM10,%XMM12,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD 0x130(%RSP),%XMM10,%XMM10 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VFMADD213SD %XMM11,%XMM3,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD 0x200(%RSP),%XMM7,%XMM11 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VSHUFPD $0x1,%XMM11,%XMM11,%XMM12 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM11,%XMM12,%XMM11 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD213SD %XMM9,%XMM6,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD 0x1c0(%RSP),%XMM7,%XMM9 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVAPD %XMM4,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD132SD 0xb0(%RSP),%XMM9,%XMM12 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VSHUFPD $0x1,%XMM9,%XMM9,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD 0xa0(%RSP),%XMM9,%XMM9 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VADDSD %XMM9,%XMM12,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVAPD %YMM15,%YMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %YMM0,%YMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %XMM14,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %XMM1,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VADDSD 0xd0(%RSP),%XMM11,%XMM11 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VMULSD %XMM8,%XMM9,%XMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213SD %XMM10,%XMM5,%XMM11 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %XMM7,%XMM12,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %XMM4,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD213SD %XMM9,%XMM1,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VSHUFPD $0x1,%XMM9,%XMM9,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM0,%XMM9,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM9,%XMM10,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVAPD %YMM13,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVUPD 0x80(%RSP),%XMM13 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD213SD %XMM8,%XMM6,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %XMM7,%XMM10,%XMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %XMM4,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD132SD 0x90(%RSP),%XMM6,%XMM8 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VSHUFPD $0x1,%XMM6,%XMM6,%XMM6 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM6,%XMM13,%XMM6 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM6,%XMM8,%XMM6 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD213SD %XMM9,%XMM5,%XMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD 0x70(%RSP),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMULPD 0x1e0(%RSP),%XMM7,%XMM5 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VSHUFPD $0x1,%XMM5,%XMM5,%XMM8 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM5,%XMM8,%XMM5 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVUPD 0x120(%RSP),%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VADDSD 0xc0(%RSP),%XMM5,%XMM5 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VFMADD213SD %XMM11,%XMM3,%XMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %XMM7,%XMM15,%XMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213SD %XMM7,%XMM9,%XMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VSHUFPD $0x1,%XMM7,%XMM7,%XMM7 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM7,%XMM8,%XMM7 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM7,%XMM4,%XMM4 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD213SD %XMM6,%XMM3,%XMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD 0x110(%RSP),%XMM6 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMOVUPD 0xe0(%RSP),%XMM3 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
MOVSXD (%RAX,%RDI,4),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
ADD %R11,%R8 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
VMULSD %XMM2,%XMM6,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVSD %XMM2,(%RCX,%R8,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD %XMM3,%XMM5,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VDIVSD 0x40(%RSP),%XMM2,%XMM1 | 1 | 1 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 13-15 | 4 |
VMOVSD %XMM1,(%RDX,%R8,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMOVUPD (%RSP),%XMM1 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMOVUPD 0x100(%RSP),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMOVSD %XMM4,(%R9,%R8,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
MOV 0x20(%RBP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
INC %RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %RDI,%R10 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JNE 41cf40 <_ZNK11qmcplusplus14BsplineFunctorIdE11evaluateVGLEiiiPKdPdS4_S4_S4_Pi+0x9b0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
Function | qmcplusplus::BsplineFunctor |
Source file and lines | BsplineFunctor.h:303-338 |
Module | exec |
nb instructions | 104 |
nb uops | 105 |
loop length | 599 |
used x86 registers | 11 |
used mmx registers | 0 |
used xmm registers | 16 |
used ymm registers | 5 |
used zmm registers | 0 |
nb stack references | 27 |
ADD-SUB / MUL ratio | 1.06 |
micro-operation queue | 17.50 cycles |
front end | 17.50 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 22.00 | 21.50 | 11.00 | 11.00 | 2.00 | 20.50 | 1.60 | 2.00 | 2.00 | 2.00 | 1.40 | 11.00 |
cycles | 22.00 | 25.00 | 11.00 | 11.00 | 2.00 | 20.50 | 1.60 | 2.00 | 2.00 | 2.00 | 1.40 | 11.00 |
Cycles executing div or sqrt instructions | 4.00 |
Longest recurrence chain latency (RecMII) | 1.00-2.00 |
FE+BE cycles | 26.22-26.00 |
Stall cycles | 8.03-7.80 |
RS full (events) | 18.90-18.10 |
PRF_FLOAT full (events) | 0.09-0.43 |
Front-end | 17.50 |
Dispatch | 25.00 |
DIV/SQRT | 4.00 |
Data deps. | 1.00-2.00 |
Overall L1 | 25.00 |
all | 43% |
load | 48% |
store | 0% |
mul | 50% |
add-sub | 0% |
fma | 0% |
div/sqrt | 0% |
other | 88% |
all | 19% |
load | 18% |
store | 12% |
mul | 18% |
add-sub | 12% |
fma | 12% |
div/sqrt | 12% |
other | 29% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVSD (%R8,%RDI,8),%XMM2 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD %XMM2,0x40(%RSP) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD %XMM3,%XMM2,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCVTTSD2SI %XMM2,%R8D | 2 | 1.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 1 |
VROUNDSD $0xb,%XMM2,%XMM2,%XMM3 | 2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 1 |
MOVSXD %R8D,%R8 | 1 | 0 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0.33 | 0 | 1 | 0.33 |
VMOVSD (%RSI,%R8,8),%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD 0x8(%RSI,%R8,8),%XMM6 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMOVSD 0x10(%RSI,%R8,8),%XMM5 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VSUBSD %XMM3,%XMM2,%XMM2 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVSD 0x18(%RSI,%R8,8),%XMM3 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
VMULSD %XMM2,%XMM2,%XMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULSD %XMM2,%XMM7,%XMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %XMM2,%XMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD213SD %XMM1,%XMM11,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %YMM10,%YMM13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %XMM2,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMULSD %XMM8,%XMM9,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD 0x190(%RSP),%XMM1 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132SD 0xf0(%RSP),%XMM1,%XMM10 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVAPD %XMM2,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVUPD 0x170(%RSP),%XMM1 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132SD 0x180(%RSP),%XMM1,%XMM11 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VUNPCKLPD %XMM2,%XMM7,%XMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VFMADD213SD %XMM9,%XMM6,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD 0x150(%RSP),%XMM1 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD132SD 0x160(%RSP),%XMM1,%XMM2 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMULPD 0x6a0(%RSP),%XMM7,%XMM9 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVAPD %XMM14,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %XMM0,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %YMM15,%YMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %YMM12,%YMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VSHUFPD $0x1,%XMM9,%XMM9,%XMM12 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VFMADD213SD %XMM10,%XMM5,%XMM11 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VADDSD %XMM9,%XMM12,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD 0x140(%RSP),%XMM9,%XMM9 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VMULPD 0x680(%RSP),%XMM7,%XMM10 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMULSD %XMM8,%XMM9,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VSHUFPD $0x1,%XMM10,%XMM10,%XMM12 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM10,%XMM12,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD 0x130(%RSP),%XMM10,%XMM10 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VFMADD213SD %XMM11,%XMM3,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD 0x200(%RSP),%XMM7,%XMM11 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VSHUFPD $0x1,%XMM11,%XMM11,%XMM12 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM11,%XMM12,%XMM11 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD213SD %XMM9,%XMM6,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD 0x1c0(%RSP),%XMM7,%XMM9 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVAPD %XMM4,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD132SD 0xb0(%RSP),%XMM9,%XMM12 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VSHUFPD $0x1,%XMM9,%XMM9,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD 0xa0(%RSP),%XMM9,%XMM9 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VADDSD %XMM9,%XMM12,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVAPD %YMM15,%YMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %YMM0,%YMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %XMM14,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %XMM1,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VADDSD 0xd0(%RSP),%XMM11,%XMM11 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VMULSD %XMM8,%XMM9,%XMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213SD %XMM10,%XMM5,%XMM11 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %XMM7,%XMM12,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %XMM4,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD213SD %XMM9,%XMM1,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VSHUFPD $0x1,%XMM9,%XMM9,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM0,%XMM9,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM9,%XMM10,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVAPD %YMM13,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVUPD 0x80(%RSP),%XMM13 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VFMADD213SD %XMM8,%XMM6,%XMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %XMM7,%XMM10,%XMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %XMM4,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD132SD 0x90(%RSP),%XMM6,%XMM8 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VSHUFPD $0x1,%XMM6,%XMM6,%XMM6 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM6,%XMM13,%XMM6 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM6,%XMM8,%XMM6 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD213SD %XMM9,%XMM5,%XMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD 0x70(%RSP),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMULPD 0x1e0(%RSP),%XMM7,%XMM5 | 1 | 0.50 | 0.50 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VSHUFPD $0x1,%XMM5,%XMM5,%XMM8 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM5,%XMM8,%XMM5 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVUPD 0x120(%RSP),%XMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VADDSD 0xc0(%RSP),%XMM5,%XMM5 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
VFMADD213SD %XMM11,%XMM3,%XMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %XMM7,%XMM15,%XMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213SD %XMM7,%XMM9,%XMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VSHUFPD $0x1,%XMM7,%XMM7,%XMM7 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM7,%XMM8,%XMM7 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM7,%XMM4,%XMM4 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD213SD %XMM6,%XMM3,%XMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD 0x110(%RSP),%XMM6 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMOVUPD 0xe0(%RSP),%XMM3 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
MOVSXD (%RAX,%RDI,4),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
ADD %R11,%R8 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
VMULSD %XMM2,%XMM6,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVSD %XMM2,(%RCX,%R8,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMULSD %XMM3,%XMM5,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VDIVSD 0x40(%RSP),%XMM2,%XMM1 | 1 | 1 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 13-15 | 4 |
VMOVSD %XMM1,(%RDX,%R8,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
VMOVUPD (%RSP),%XMM1 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMOVUPD 0x100(%RSP),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMOVSD %XMM4,(%R9,%R8,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
MOV 0x20(%RBP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
INC %RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %RDI,%R10 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JNE 41cf40 <_ZNK11qmcplusplus14BsplineFunctorIdE11evaluateVGLEiiiPKdPdS4_S4_S4_Pi+0x9b0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |