|
10 | 10 |
|
11 | 11 | from .metricset import MetricSet, Metric |
12 | 12 |
|
| 13 | +k_GE_desc = ( |
| 14 | + "The overall quality of the parallelisation. This is the product of the " |
| 15 | + "Parallel Efficiency and Computational Scaling" |
| 16 | +) |
| 17 | +k_PE_desc = ( |
| 18 | + "The overall efficiency with which the computation is parallelised between " |
| 19 | + "different processes and threads. This is further divided into the Process Level" |
| 20 | + "Efficiency and the Thread Level Efficiency" |
| 21 | +) |
| 22 | +k_PLE_desc = ( |
| 23 | + "The efficiency of the application as viewed at the process level, including the " |
| 24 | + "MPI communication and process-level load balance." |
| 25 | +) |
| 26 | +k_MPILB_desc = ( |
| 27 | + "The efficiency with which the total amount of computational work is shared " |
| 28 | + "between the different MPI processes. Low values indicate that there is significant " |
| 29 | + "imbalance between the most and least loaded processes." |
| 30 | +) |
| 31 | +k_MPICE_desc = ( |
| 32 | + "The efficiency with which the application carries out MPI communication. An ideal " |
| 33 | + "application will spend no time communicating and 100% of time in computation. Low " |
| 34 | + "values indicate that too much communication is being performed for the amount of " |
| 35 | + "computation." |
| 36 | +) |
| 37 | +k_MPITE_desc = ( |
| 38 | + "The efficiency of the actual transfer of data via MPI. This reflects the size of " |
| 39 | + "the data being communicated and the speed of the underlying communication network. " |
| 40 | + "Low values indicate that the network bandwidth is insufficient for the required " |
| 41 | + "communication rate, or that too much data is being communicated." |
| 42 | +) |
| 43 | +k_MPISE_desc = ( |
| 44 | + "The efficiency with which the MPI communications are synchronised and carried out. " |
| 45 | + "Low values indicate that there is significant irregularity in the timings of " |
| 46 | + "different processes arrivals at MPI calls, reducing efficiency due to waiting " |
| 47 | + "for MPI calls to complete." |
| 48 | +) |
| 49 | +k_COMPSC_desc = ( |
| 50 | + "The way in which the total computational cost varies with the applied parallelism. " |
| 51 | + "This is a combination of the increased cost due to additional calculations " |
| 52 | + "performed, and increased costs due to reduced instructions per cycle." |
| 53 | +) |
| 54 | +k_INSSC_desc = ( |
| 55 | + "Inefficiencies introduced due to an increase in the total computational work done, " |
| 56 | + "measured by the total CPU instructions. Ideally, there would be no additional " |
| 57 | + "computation required when parallelising, but there is normally some additional " |
| 58 | + "cost to manage the distribution of work. The Instruction Scaling metric " |
| 59 | + "represents this by calculating the relative difference in total instructions " |
| 60 | + "between runs." |
| 61 | +) |
| 62 | +k_IPCSC_desc = ( |
| 63 | + "Inefficiencies due to changes in the instructions per cycle executed by the CPUs. " |
| 64 | + "The IPC rate can be reduced due to CPU data starvation, inefficient cache usage or " |
| 65 | + "high rates of branch misprediction." |
| 66 | +) |
| 67 | +k_FREQSC_desc = ( |
| 68 | + "Inefficiencies due to changes in the rate at which the CPU executes instructions. " |
| 69 | + "This is typically due to thermal management in the CPU reducing the overall clock " |
| 70 | + "speed." |
| 71 | +) |
| 72 | + |
13 | 73 |
|
14 | 74 | class MPI_Metrics(MetricSet): |
15 | 75 | """Pure MPI Metrics (additive version). |
16 | 76 | """ |
17 | 77 |
|
18 | 78 | _metric_list = [ |
19 | | - Metric("Global Efficiency", 0), |
20 | | - Metric("Parallel Efficiency", 1), |
21 | | - Metric("MPI Load Balance", 2, "Load balance"), |
22 | | - Metric("MPI Communication Efficiency", 2), |
23 | | - Metric("MPI Transfer Efficiency", 3), |
24 | | - Metric("MPI Serialisation Efficiency", 3), |
25 | | - Metric("Computation Scaling", 1), |
26 | | - Metric("Instruction Scaling", 2), |
27 | | - Metric("IPC Scaling", 2), |
28 | | - Metric("Frequency Scaling", 2), |
| 79 | + Metric("Global Efficiency", 0, desc=k_GE_desc), |
| 80 | + Metric("Parallel Efficiency", 1, desc=k_PE_desc), |
| 81 | + Metric("MPI Load Balance", 2, "Load balance", desc=k_MPILB_desc), |
| 82 | + Metric("MPI Communication Efficiency", 2, desc=k_MPICE_desc), |
| 83 | + Metric("MPI Transfer Efficiency", 3, desc=k_MPITE_desc), |
| 84 | + Metric("MPI Serialisation Efficiency", 3, desc=k_MPISE_desc), |
| 85 | + Metric("Computation Scaling", 1, desc=k_COMPSC_desc), |
| 86 | + Metric("Instruction Scaling", 2, desc=k_INSSC_desc), |
| 87 | + Metric("IPC Scaling", 2, desc=k_IPCSC_desc), |
| 88 | + Metric("Frequency Scaling", 2, desc=k_FREQSC_desc), |
29 | 89 | ] |
30 | 90 |
|
31 | 91 | _programming_model = "MPI" |
|
0 commit comments