| Loop Id: 97 | Module: exec | Source: advec_cell_kernel.f90:202-246 [...] | Coverage: 4.48% |
|---|
| Loop Id: 97 | Module: exec | Source: advec_cell_kernel.f90:202-246 [...] | Coverage: 4.48% |
|---|
0x41d828 MOV 0x120(%RSP),%R12 [10] |
0x41d830 VMOVSD (%R12,%RAX,8),%XMM11 [2] |
0x41d836 VCOMISD %XMM3,%XMM11 |
0x41d83a JBE 41f610 |
0x41d840 MOV 0x108(%RSP),%RSI [10] |
0x41d848 MOVSXD 0xd8(%RSP),%RDI [10] |
0x41d850 MOV 0x130(%RSP),%R11 [10] |
0x41d858 MOV %RSI,%RDX |
0x41d85b MOV 0x138(%RSP),%RCX [10] |
0x41d863 MOV 0x100(%RSP),%R8 [10] |
0x41d86b VANDPD %XMM5,%XMM11,%XMM2 |
0x41d86f MOV 0x128(%RSP),%R12 [10] |
0x41d877 VMOVSD (%R13),%XMM10 [5] |
0x41d87d IMUL %RDX,%RCX |
0x41d881 ADD %R12,%RSI |
0x41d884 MOV %R15,%R12 |
0x41d887 IMUL %RDI,%R12 |
0x41d88b ADD %R8,%RCX |
0x41d88e MOV 0xe8(%RSP),%R8 [10] |
0x41d896 ADD %RAX,%RCX |
0x41d899 VDIVSD (%R8,%RSI,8),%XMM10,%XMM12 [8] |
0x41d89f MOV %R15,%RSI |
0x41d8a2 MOV 0xf0(%RSP),%R8 [10] |
0x41d8aa VDIVSD (%R9,%RCX,8),%XMM2,%XMM9 [7] |
0x41d8b0 VADDSD %XMM4,%XMM9,%XMM13 |
0x41d8b4 IMUL %RDX,%RSI |
0x41d8b8 VMULSD %XMM13,%XMM12,%XMM10 |
0x41d8bd VSUBSD %XMM9,%XMM8,%XMM13 |
0x41d8c2 ADD %R8,%RSI |
0x41d8c5 ADD %RAX,%R8 |
0x41d8c8 ADD %RAX,%RSI |
0x41d8cb ADD %R8,%R12 |
0x41d8ce VMOVSD (%R10,%RSI,8),%XMM15 [1] |
0x41d8d4 VSUBSD (%R10,%R12,8),%XMM15,%XMM0 [4] |
0x41d8da MOV %R15,%R12 |
0x41d8dd IMUL %R11,%R12 |
0x41d8e1 ADD %R12,%R8 |
0x41d8e4 VMOVSD (%R10,%R8,8),%XMM14 [13] |
0x41d8ea VSUBSD %XMM15,%XMM14,%XMM1 |
0x41d8ef VMULSD %XMM0,%XMM1,%XMM14 |
0x41d8f3 VCMPSD $0x6,%XMM3,%XMM1,%XMM2 |
0x41d8f8 VBLENDVPD %XMM2,%XMM4,%XMM6,%XMM12 |
0x41d8fe VCOMISD %XMM3,%XMM14 |
0x41d902 JBE 41d932 |
0x41d904 VANDPD %XMM5,%XMM1,%XMM1 |
0x41d908 VANDPD %XMM5,%XMM0,%XMM0 |
0x41d90c VSUBSD %XMM9,%XMM4,%XMM9 |
0x41d911 VMULSD %XMM13,%XMM1,%XMM2 |
0x41d916 VMINSD %XMM1,%XMM0,%XMM14 |
0x41d91a VFMADD132SD %XMM10,%XMM2,%XMM0 |
0x41d91f VMULSD %XMM7,%XMM0,%XMM1 |
0x41d923 VMINSD %XMM14,%XMM1,%XMM0 |
0x41d928 VMULSD %XMM9,%XMM0,%XMM14 |
0x41d92d VFMADD231SD %XMM12,%XMM14,%XMM15 |
0x41d932 VMULSD %XMM11,%XMM15,%XMM1 |
0x41d937 MOV 0x118(%RSP),%R8 [10] |
0x41d93f IMUL %R14,%RDX |
0x41d943 IMUL %R14,%RDI |
0x41d947 IMUL %R14,%R11 |
0x41d94b VMOVSD %XMM1,(%R8,%RAX,8) [9] |
0x41d951 VMOVSD (%R9,%RCX,8),%XMM12 [7] |
0x41d957 MOV 0xf8(%RSP),%RCX [10] |
0x41d95f VMOVSD (%R10,%RSI,8),%XMM15 [1] |
0x41d965 LEA (%RCX,%RAX,1),%RSI |
0x41d969 ADD %RSI,%RDX |
0x41d96c ADD %RSI,%RDI |
0x41d96f ADD %R11,%RSI |
0x41d972 VMOVSD (%RBX,%RDX,8),%XMM9 [11] |
0x41d977 VMOVSD (%RBX,%RSI,8),%XMM11 [3] |
0x41d97c VSUBSD %XMM9,%XMM11,%XMM0 |
0x41d981 VSUBSD (%RBX,%RDI,8),%XMM9,%XMM2 [12] |
0x41d986 VCMPSD $0x6,%XMM3,%XMM0,%XMM14 |
0x41d98b VBLENDVPD %XMM14,%XMM4,%XMM6,%XMM11 |
0x41d991 VMULSD %XMM2,%XMM0,%XMM14 |
0x41d995 VCOMISD %XMM3,%XMM14 |
0x41d999 JBE 41f5e0 |
0x41d99f VMULSD %XMM15,%XMM12,%XMM15 |
0x41d9a4 VANDPD %XMM5,%XMM0,%XMM0 |
0x41d9a8 VANDPD %XMM5,%XMM2,%XMM2 |
0x41d9ac MOV 0x110(%RSP),%R11 [10] |
0x41d9b4 VMULSD %XMM13,%XMM0,%XMM13 |
0x41d9b9 VMINSD %XMM0,%XMM2,%XMM14 |
0x41d9bd VFMADD132SD %XMM10,%XMM13,%XMM2 |
0x41d9c2 VANDPD %XMM5,%XMM1,%XMM10 |
0x41d9c6 VDIVSD %XMM15,%XMM10,%XMM12 |
0x41d9cb VMULSD %XMM7,%XMM2,%XMM2 |
0x41d9cf VMINSD %XMM14,%XMM2,%XMM14 |
0x41d9d4 VSUBSD %XMM12,%XMM4,%XMM0 |
0x41d9d9 VMULSD %XMM14,%XMM0,%XMM13 |
0x41d9de VFMADD132SD %XMM11,%XMM9,%XMM13 |
0x41d9e3 VMULSD %XMM1,%XMM13,%XMM1 |
0x41d9e7 VMOVSD %XMM1,(%R11,%RAX,8) [6] |
0x41d9ed INC %RAX |
0x41d9f0 CMP %EAX,0xe0(%RSP) [10] |
0x41d9f7 JGE 41d828 |
0x41f5e0 VMULSD %XMM1,%XMM9,%XMM10 |
0x41f5e4 MOV 0x110(%RSP),%RDI [10] |
0x41f5ec VMOVSD %XMM10,(%RDI,%RAX,8) [6] |
0x41f5f1 INC %RAX |
0x41f5f4 CMP %EAX,0xe0(%RSP) [10] |
0x41f5fb JGE 41d828 |
0x41f610 MOV 0xb8(%RSP),%RDI [10] |
0x41f618 MOV 0x108(%RSP),%R11 [10] |
0x41f620 MOV 0x130(%RSP),%RDX [10] |
0x41f628 MOV %RDI,%RSI |
0x41f62b JMP 41d85b |
/scratch_na/users/xoserete/qaas_runs/171-214-9740/intel/CloverLeafFC/build/CloverLeafFC/CloverLeaf_ref/kernels/advec_cell_kernel.f90: 202 - 246 |
-------------------------------------------------------------------------------- |
202: DO j=x_min,x_max |
203: |
204: IF(vol_flux_y(j,k).GT.0.0)THEN |
[...] |
216: sigmat=ABS(vol_flux_y(j,k))/pre_vol(j,donor) |
217: sigma3=(1.0_8+sigmat)*(vertexdy(k)/vertexdy(dif)) |
218: sigma4=2.0_8-sigmat |
219: |
220: sigma=sigmat |
221: sigmav=sigmat |
222: |
223: diffuw=density1(j,donor)-density1(j,upwind) |
224: diffdw=density1(j,downwind)-density1(j,donor) |
225: wind=1.0_8 |
226: IF(diffdw.LE.0.0) wind=-1.0_8 |
227: IF(diffuw*diffdw.GT.0.0)THEN |
228: limiter=(1.0_8-sigmav)*wind*MIN(ABS(diffuw),ABS(diffdw)& |
229: ,one_by_six*(sigma3*ABS(diffuw)+sigma4*ABS(diffdw))) |
230: ELSE |
231: limiter=0.0 |
232: ENDIF |
233: mass_flux_y(j,k)=vol_flux_y(j,k)*(density1(j,donor)+limiter) |
234: |
235: sigmam=ABS(mass_flux_y(j,k))/(density1(j,donor)*pre_vol(j,donor)) |
236: diffuw=energy1(j,donor)-energy1(j,upwind) |
237: diffdw=energy1(j,downwind)-energy1(j,donor) |
238: wind=1.0_8 |
239: IF(diffdw.LE.0.0) wind=-1.0_8 |
240: IF(diffuw*diffdw.GT.0.0)THEN |
241: limiter=(1.0_8-sigmam)*wind*MIN(ABS(diffuw),ABS(diffdw)& |
242: ,one_by_six*(sigma3*ABS(diffuw)+sigma4*ABS(diffdw))) |
243: ELSE |
244: limiter=0.0 |
245: ENDIF |
246: ener_flux(j,k)=mass_flux_y(j,k)*(energy1(j,donor)+limiter) |
| Path / |
| Metric | Value |
|---|---|
| CQA speedup if no scalar integer | 1.15 |
| CQA speedup if FP arith vectorized | 2.82 |
| CQA speedup if fully vectorized | 3.17 |
| CQA speedup if no inter-iteration dependency | NA |
| CQA speedup if next bottleneck killed | 1.02 |
| Bottlenecks | micro-operation queue, |
| Function | __advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0 |
| Source | advec_cell_kernel.f90:202-204,advec_cell_kernel.f90:216-246 |
| Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
| Source loop unroll confidence level | max |
| Unroll/vectorization loop type | NA |
| Unroll factor | NA |
| CQA cycles | 19.00 |
| CQA cycles if no scalar integer | 16.50 |
| CQA cycles if FP arith vectorized | 6.73 |
| CQA cycles if fully vectorized | 6.00 |
| Front-end cycles | 19.00 |
| DIV/SQRT cycles | 18.67 |
| P0 cycles | 18.67 |
| P1 cycles | 10.00 |
| P2 cycles | 10.00 |
| P3 cycles | 1.50 |
| P4 cycles | 18.67 |
| P5 cycles | 9.00 |
| P6 cycles | 1.50 |
| P7 cycles | 1.50 |
| P8 cycles | 1.50 |
| P9 cycles | 9.00 |
| P10 cycles | 10.00 |
| P11 cycles | 12.00 |
| Inter-iter dependencies cycles | NA |
| FE+BE cycles (UFS) | 22.02 - 22.51 |
| Stall cycles (UFS) | 2.17 - 2.66 |
| Nb insns | 106.00 |
| Nb uops | 110.00 |
| Nb loads | 30.00 |
| Nb stores | 3.00 |
| Nb stack references | 14.00 |
| FLOP/cycle | 1.68 |
| Nb FLOP add-sub | 8.00 |
| Nb FLOP mul | 13.00 |
| Nb FLOP fma | 4.00 |
| Nb FLOP div | 3.00 |
| Nb FLOP rcp | 0.00 |
| Nb FLOP sqrt | 0.00 |
| Nb FLOP rsqrt | 0.00 |
| Bytes/cycle | 13.26 |
| Bytes prefetched | 0.00 |
| Bytes loaded | 228.00 |
| Bytes stored | 24.00 |
| Stride 0 | NA |
| Stride 1 | NA |
| Stride n | NA |
| Stride unknown | NA |
| Stride indirect | NA |
| Vectorization ratio all | 14.04 |
| Vectorization ratio load | 0.00 |
| Vectorization ratio store | 0.00 |
| Vectorization ratio mul | 0.00 |
| Vectorization ratio add_sub | 0.00 |
| Vectorization ratio fma | 0.00 |
| Vectorization ratio div_sqrt | 0.00 |
| Vectorization ratio other | 44.44 |
| Vector-efficiency ratio all | 14.14 |
| Vector-efficiency ratio load | 12.02 |
| Vector-efficiency ratio store | 12.50 |
| Vector-efficiency ratio mul | 12.50 |
| Vector-efficiency ratio add_sub | 12.50 |
| Vector-efficiency ratio fma | 12.50 |
| Vector-efficiency ratio div_sqrt | 12.50 |
| Vector-efficiency ratio other | 17.71 |
| Metric | Value |
|---|---|
| CQA speedup if no scalar integer | 1.15 |
| CQA speedup if FP arith vectorized | 2.82 |
| CQA speedup if fully vectorized | 3.17 |
| CQA speedup if no inter-iteration dependency | NA |
| CQA speedup if next bottleneck killed | 1.02 |
| Bottlenecks | micro-operation queue, |
| Function | __advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0 |
| Source | advec_cell_kernel.f90:202-204,advec_cell_kernel.f90:216-246 |
| Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
| Source loop unroll confidence level | max |
| Unroll/vectorization loop type | NA |
| Unroll factor | NA |
| CQA cycles | 19.00 |
| CQA cycles if no scalar integer | 16.50 |
| CQA cycles if FP arith vectorized | 6.73 |
| CQA cycles if fully vectorized | 6.00 |
| Front-end cycles | 19.00 |
| DIV/SQRT cycles | 18.67 |
| P0 cycles | 18.67 |
| P1 cycles | 10.00 |
| P2 cycles | 10.00 |
| P3 cycles | 1.50 |
| P4 cycles | 18.67 |
| P5 cycles | 9.00 |
| P6 cycles | 1.50 |
| P7 cycles | 1.50 |
| P8 cycles | 1.50 |
| P9 cycles | 9.00 |
| P10 cycles | 10.00 |
| P11 cycles | 12.00 |
| Inter-iter dependencies cycles | NA |
| FE+BE cycles (UFS) | 22.02 - 22.51 |
| Stall cycles (UFS) | 2.17 - 2.66 |
| Nb insns | 106.00 |
| Nb uops | 110.00 |
| Nb loads | 30.00 |
| Nb stores | 3.00 |
| Nb stack references | 14.00 |
| FLOP/cycle | 1.68 |
| Nb FLOP add-sub | 8.00 |
| Nb FLOP mul | 13.00 |
| Nb FLOP fma | 4.00 |
| Nb FLOP div | 3.00 |
| Nb FLOP rcp | 0.00 |
| Nb FLOP sqrt | 0.00 |
| Nb FLOP rsqrt | 0.00 |
| Bytes/cycle | 13.26 |
| Bytes prefetched | 0.00 |
| Bytes loaded | 228.00 |
| Bytes stored | 24.00 |
| Stride 0 | NA |
| Stride 1 | NA |
| Stride n | NA |
| Stride unknown | NA |
| Stride indirect | NA |
| Vectorization ratio all | 14.04 |
| Vectorization ratio load | 0.00 |
| Vectorization ratio store | 0.00 |
| Vectorization ratio mul | 0.00 |
| Vectorization ratio add_sub | 0.00 |
| Vectorization ratio fma | 0.00 |
| Vectorization ratio div_sqrt | 0.00 |
| Vectorization ratio other | 44.44 |
| Vector-efficiency ratio all | 14.14 |
| Vector-efficiency ratio load | 12.02 |
| Vector-efficiency ratio store | 12.50 |
| Vector-efficiency ratio mul | 12.50 |
| Vector-efficiency ratio add_sub | 12.50 |
| Vector-efficiency ratio fma | 12.50 |
| Vector-efficiency ratio div_sqrt | 12.50 |
| Vector-efficiency ratio other | 17.71 |
| Path / |
| Function | __advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0 |
| Source file and lines | advec_cell_kernel.f90:202-246 |
| Module | exec |
| nb instructions | 106 |
| nb uops | 110 |
| loop length | 534 |
| used x86 registers | 15 |
| used mmx registers | 0 |
| used xmm registers | 16 |
| used ymm registers | 0 |
| used zmm registers | 0 |
| nb stack references | 14 |
| ADD-SUB / MUL ratio | 0.62 |
| micro-operation queue | 19.00 cycles |
| front end | 19.00 cycles |
| P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| uops | 18.67 | 18.67 | 10.00 | 10.00 | 1.50 | 18.67 | 9.00 | 1.50 | 1.50 | 1.50 | 9.00 | 10.00 |
| cycles | 18.67 | 18.67 | 10.00 | 10.00 | 1.50 | 18.67 | 9.00 | 1.50 | 1.50 | 1.50 | 9.00 | 10.00 |
| Cycles executing div or sqrt instructions | 12.00 |
| FE+BE cycles | 22.02-22.51 |
| Stall cycles | 2.17-2.66 |
| ROB full (events) | 2.71-3.25 |
| Front-end | 19.00 |
| Dispatch | 18.67 |
| DIV/SQRT | 12.00 |
| Overall L1 | 19.00 |
| all | 0% |
| load | 0% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | NA (no mul vectorizable/vectorized instructions) |
| add-sub | NA (no add-sub vectorizable/vectorized instructions) |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 0% |
| all | 14% |
| load | 0% |
| store | 0% |
| mul | 0% |
| add-sub | 0% |
| fma | 0% |
| div/sqrt | 0% |
| other | 47% |
| all | 14% |
| load | 0% |
| store | 0% |
| mul | 0% |
| add-sub | 0% |
| fma | 0% |
| div/sqrt | 0% |
| other | 44% |
| all | 6% |
| load | 6% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | NA (no mul vectorizable/vectorized instructions) |
| add-sub | NA (no add-sub vectorizable/vectorized instructions) |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 6% |
| all | 14% |
| load | 12% |
| store | 12% |
| mul | 12% |
| add-sub | 12% |
| fma | 12% |
| div/sqrt | 12% |
| other | 18% |
| all | 14% |
| load | 12% |
| store | 12% |
| mul | 12% |
| add-sub | 12% |
| fma | 12% |
| div/sqrt | 12% |
| other | 17% |
| Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| MOV 0x120(%RSP),%R12 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD (%R12,%RAX,8),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VCOMISD %XMM3,%XMM11 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| JBE 41f610 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x2600> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| MOV 0x108(%RSP),%RSI | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOVSXD 0xd8(%RSP),%RDI | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0x130(%RSP),%R11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV %RSI,%RDX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| MOV 0x138(%RSP),%RCX | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0x100(%RSP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VANDPD %XMM5,%XMM11,%XMM2 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| MOV 0x128(%RSP),%R12 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD (%R13),%XMM10 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| IMUL %RDX,%RCX | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| ADD %R12,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| MOV %R15,%R12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| IMUL %RDI,%R12 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| ADD %R8,%RCX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| MOV 0xe8(%RSP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| ADD %RAX,%RCX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| VDIVSD (%R8,%RSI,8),%XMM10,%XMM12 | 1 | 1 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 13-15 | 4 |
| MOV %R15,%RSI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| MOV 0xf0(%RSP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VDIVSD (%R9,%RCX,8),%XMM2,%XMM9 | 1 | 1 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 13-15 | 4 |
| VADDSD %XMM4,%XMM9,%XMM13 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| IMUL %RDX,%RSI | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VMULSD %XMM13,%XMM12,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VSUBSD %XMM9,%XMM8,%XMM13 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| ADD %R8,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %RAX,%R8 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %RAX,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %R8,%R12 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| VMOVSD (%R10,%RSI,8),%XMM15 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VSUBSD (%R10,%R12,8),%XMM15,%XMM0 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
| MOV %R15,%R12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| IMUL %R11,%R12 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| ADD %R12,%R8 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| VMOVSD (%R10,%R8,8),%XMM14 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VSUBSD %XMM15,%XMM14,%XMM1 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULSD %XMM0,%XMM1,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VCMPSD $0x6,%XMM3,%XMM1,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VBLENDVPD %XMM2,%XMM4,%XMM6,%XMM12 | 3 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2-3 | 1 |
| VCOMISD %XMM3,%XMM14 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| JBE 41d932 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x922> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| VANDPD %XMM5,%XMM1,%XMM1 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VANDPD %XMM5,%XMM0,%XMM0 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VSUBSD %XMM9,%XMM4,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULSD %XMM13,%XMM1,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINSD %XMM1,%XMM0,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD132SD %XMM10,%XMM2,%XMM0 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULSD %XMM7,%XMM0,%XMM1 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINSD %XMM14,%XMM1,%XMM0 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULSD %XMM9,%XMM0,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD231SD %XMM12,%XMM14,%XMM15 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULSD %XMM11,%XMM15,%XMM1 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| MOV 0x118(%RSP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| IMUL %R14,%RDX | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| IMUL %R14,%RDI | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| IMUL %R14,%R11 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VMOVSD %XMM1,(%R8,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
| VMOVSD (%R9,%RCX,8),%XMM12 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0xf8(%RSP),%RCX | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD (%R10,%RSI,8),%XMM15 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| LEA (%RCX,%RAX,1),%RSI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| ADD %RSI,%RDX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %RSI,%RDI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %R11,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| VMOVSD (%RBX,%RDX,8),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD (%RBX,%RSI,8),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VSUBSD %XMM9,%XMM11,%XMM0 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VSUBSD (%RBX,%RDI,8),%XMM9,%XMM2 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
| VCMPSD $0x6,%XMM3,%XMM0,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VBLENDVPD %XMM14,%XMM4,%XMM6,%XMM11 | 3 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2-3 | 1 |
| VMULSD %XMM2,%XMM0,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VCOMISD %XMM3,%XMM14 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| JBE 41f5e0 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x25d0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| VMULSD %XMM15,%XMM12,%XMM15 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VANDPD %XMM5,%XMM0,%XMM0 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VANDPD %XMM5,%XMM2,%XMM2 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| MOV 0x110(%RSP),%R11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMULSD %XMM13,%XMM0,%XMM13 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINSD %XMM0,%XMM2,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD132SD %XMM10,%XMM13,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VANDPD %XMM5,%XMM1,%XMM10 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VDIVSD %XMM15,%XMM10,%XMM12 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13-15 | 4 |
| VMULSD %XMM7,%XMM2,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINSD %XMM14,%XMM2,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VSUBSD %XMM12,%XMM4,%XMM0 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULSD %XMM14,%XMM0,%XMM13 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD132SD %XMM11,%XMM9,%XMM13 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULSD %XMM1,%XMM13,%XMM1 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMOVSD %XMM1,(%R11,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
| INC %RAX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| CMP %EAX,0xe0(%RSP) | 1 | 0.20 | 0.20 | 0.33 | 0.33 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.33 | 1 | 0.33 |
| JGE 41d828 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x818> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| VMULSD %XMM1,%XMM9,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| MOV 0x110(%RSP),%RDI | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD %XMM10,(%RDI,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
| INC %RAX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| CMP %EAX,0xe0(%RSP) | 1 | 0.20 | 0.20 | 0.33 | 0.33 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.33 | 1 | 0.33 |
| JGE 41d828 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x818> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| MOV 0xb8(%RSP),%RDI | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0x108(%RSP),%R11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0x130(%RSP),%RDX | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV %RDI,%RSI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| JMP 41d85b <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x84b> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.08 |
| Function | __advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0 |
| Source file and lines | advec_cell_kernel.f90:202-246 |
| Module | exec |
| nb instructions | 106 |
| nb uops | 110 |
| loop length | 534 |
| used x86 registers | 15 |
| used mmx registers | 0 |
| used xmm registers | 16 |
| used ymm registers | 0 |
| used zmm registers | 0 |
| nb stack references | 14 |
| ADD-SUB / MUL ratio | 0.62 |
| micro-operation queue | 19.00 cycles |
| front end | 19.00 cycles |
| P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| uops | 18.67 | 18.67 | 10.00 | 10.00 | 1.50 | 18.67 | 9.00 | 1.50 | 1.50 | 1.50 | 9.00 | 10.00 |
| cycles | 18.67 | 18.67 | 10.00 | 10.00 | 1.50 | 18.67 | 9.00 | 1.50 | 1.50 | 1.50 | 9.00 | 10.00 |
| Cycles executing div or sqrt instructions | 12.00 |
| FE+BE cycles | 22.02-22.51 |
| Stall cycles | 2.17-2.66 |
| ROB full (events) | 2.71-3.25 |
| Front-end | 19.00 |
| Dispatch | 18.67 |
| DIV/SQRT | 12.00 |
| Overall L1 | 19.00 |
| all | 0% |
| load | 0% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | NA (no mul vectorizable/vectorized instructions) |
| add-sub | NA (no add-sub vectorizable/vectorized instructions) |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 0% |
| all | 14% |
| load | 0% |
| store | 0% |
| mul | 0% |
| add-sub | 0% |
| fma | 0% |
| div/sqrt | 0% |
| other | 47% |
| all | 14% |
| load | 0% |
| store | 0% |
| mul | 0% |
| add-sub | 0% |
| fma | 0% |
| div/sqrt | 0% |
| other | 44% |
| all | 6% |
| load | 6% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | NA (no mul vectorizable/vectorized instructions) |
| add-sub | NA (no add-sub vectorizable/vectorized instructions) |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 6% |
| all | 14% |
| load | 12% |
| store | 12% |
| mul | 12% |
| add-sub | 12% |
| fma | 12% |
| div/sqrt | 12% |
| other | 18% |
| all | 14% |
| load | 12% |
| store | 12% |
| mul | 12% |
| add-sub | 12% |
| fma | 12% |
| div/sqrt | 12% |
| other | 17% |
| Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| MOV 0x120(%RSP),%R12 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD (%R12,%RAX,8),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VCOMISD %XMM3,%XMM11 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| JBE 41f610 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x2600> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| MOV 0x108(%RSP),%RSI | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOVSXD 0xd8(%RSP),%RDI | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0x130(%RSP),%R11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV %RSI,%RDX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| MOV 0x138(%RSP),%RCX | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0x100(%RSP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VANDPD %XMM5,%XMM11,%XMM2 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| MOV 0x128(%RSP),%R12 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD (%R13),%XMM10 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| IMUL %RDX,%RCX | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| ADD %R12,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| MOV %R15,%R12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| IMUL %RDI,%R12 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| ADD %R8,%RCX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| MOV 0xe8(%RSP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| ADD %RAX,%RCX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| VDIVSD (%R8,%RSI,8),%XMM10,%XMM12 | 1 | 1 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 13-15 | 4 |
| MOV %R15,%RSI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| MOV 0xf0(%RSP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VDIVSD (%R9,%RCX,8),%XMM2,%XMM9 | 1 | 1 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 13-15 | 4 |
| VADDSD %XMM4,%XMM9,%XMM13 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| IMUL %RDX,%RSI | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VMULSD %XMM13,%XMM12,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VSUBSD %XMM9,%XMM8,%XMM13 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| ADD %R8,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %RAX,%R8 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %RAX,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %R8,%R12 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| VMOVSD (%R10,%RSI,8),%XMM15 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VSUBSD (%R10,%R12,8),%XMM15,%XMM0 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
| MOV %R15,%R12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| IMUL %R11,%R12 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| ADD %R12,%R8 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| VMOVSD (%R10,%R8,8),%XMM14 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VSUBSD %XMM15,%XMM14,%XMM1 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULSD %XMM0,%XMM1,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VCMPSD $0x6,%XMM3,%XMM1,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VBLENDVPD %XMM2,%XMM4,%XMM6,%XMM12 | 3 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2-3 | 1 |
| VCOMISD %XMM3,%XMM14 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| JBE 41d932 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x922> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| VANDPD %XMM5,%XMM1,%XMM1 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VANDPD %XMM5,%XMM0,%XMM0 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VSUBSD %XMM9,%XMM4,%XMM9 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULSD %XMM13,%XMM1,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINSD %XMM1,%XMM0,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD132SD %XMM10,%XMM2,%XMM0 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULSD %XMM7,%XMM0,%XMM1 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINSD %XMM14,%XMM1,%XMM0 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULSD %XMM9,%XMM0,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD231SD %XMM12,%XMM14,%XMM15 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULSD %XMM11,%XMM15,%XMM1 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| MOV 0x118(%RSP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| IMUL %R14,%RDX | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| IMUL %R14,%RDI | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| IMUL %R14,%R11 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VMOVSD %XMM1,(%R8,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
| VMOVSD (%R9,%RCX,8),%XMM12 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0xf8(%RSP),%RCX | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD (%R10,%RSI,8),%XMM15 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| LEA (%RCX,%RAX,1),%RSI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| ADD %RSI,%RDX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %RSI,%RDI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| ADD %R11,%RSI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| VMOVSD (%RBX,%RDX,8),%XMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD (%RBX,%RSI,8),%XMM11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VSUBSD %XMM9,%XMM11,%XMM0 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VSUBSD (%RBX,%RDI,8),%XMM9,%XMM2 | 1 | 0 | 0.50 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.50 |
| VCMPSD $0x6,%XMM3,%XMM0,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VBLENDVPD %XMM14,%XMM4,%XMM6,%XMM11 | 3 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2-3 | 1 |
| VMULSD %XMM2,%XMM0,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VCOMISD %XMM3,%XMM14 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| JBE 41f5e0 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x25d0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| VMULSD %XMM15,%XMM12,%XMM15 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VANDPD %XMM5,%XMM0,%XMM0 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VANDPD %XMM5,%XMM2,%XMM2 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| MOV 0x110(%RSP),%R11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMULSD %XMM13,%XMM0,%XMM13 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINSD %XMM0,%XMM2,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD132SD %XMM10,%XMM13,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VANDPD %XMM5,%XMM1,%XMM10 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VDIVSD %XMM15,%XMM10,%XMM12 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13-15 | 4 |
| VMULSD %XMM7,%XMM2,%XMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINSD %XMM14,%XMM2,%XMM14 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VSUBSD %XMM12,%XMM4,%XMM0 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULSD %XMM14,%XMM0,%XMM13 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD132SD %XMM11,%XMM9,%XMM13 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULSD %XMM1,%XMM13,%XMM1 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMOVSD %XMM1,(%R11,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
| INC %RAX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| CMP %EAX,0xe0(%RSP) | 1 | 0.20 | 0.20 | 0.33 | 0.33 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.33 | 1 | 0.33 |
| JGE 41d828 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x818> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| VMULSD %XMM1,%XMM9,%XMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| MOV 0x110(%RSP),%RDI | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| VMOVSD %XMM10,(%RDI,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 |
| INC %RAX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| CMP %EAX,0xe0(%RSP) | 1 | 0.20 | 0.20 | 0.33 | 0.33 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.33 | 1 | 0.33 |
| JGE 41d828 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x818> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| MOV 0xb8(%RSP),%RDI | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0x108(%RSP),%R11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV 0x130(%RSP),%RDX | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
| MOV %RDI,%RSI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| JMP 41d85b <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x84b> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.08 |
