LCOV - code coverage report
Current view: top level - src/dbm - dbm_multiply.c (source / functions) Coverage Total Hit
Test: CP2K Regtests (git:936074a) Lines: 76.0 % 100 76
Test Date: 2025-12-04 06:27:48 Functions: 85.7 % 7 6

            Line data    Source code
       1              : /*----------------------------------------------------------------------------*/
       2              : /*  CP2K: A general program to perform molecular dynamics simulations         */
       3              : /*  Copyright 2000-2025 CP2K developers group <https://cp2k.org>              */
       4              : /*                                                                            */
       5              : /*  SPDX-License-Identifier: BSD-3-Clause                                     */
       6              : /*----------------------------------------------------------------------------*/
       7              : #include "dbm_multiply.h"
       8              : #include "../offload/offload_mempool.h"
       9              : #include "../offload/offload_runtime.h"
      10              : #include "dbm_hyperparams.h"
      11              : #include "dbm_internal.h"
      12              : #include "dbm_library.h"
      13              : #include "dbm_multiply_comm.h"
      14              : #include "dbm_multiply_cpu.h"
      15              : #include "dbm_multiply_gpu.h"
      16              : 
      17              : #include <assert.h>
      18              : #include <limits.h>
      19              : #include <math.h>
      20              : #include <omp.h>
      21              : #include <stdio.h>
      22              : #include <stdlib.h>
      23              : #include <string.h>
      24              : 
      25              : /*******************************************************************************
      26              :  * \brief Private routine for computing the max filter threshold for each row.
      27              :  * \author Ole Schuett
      28              :  ******************************************************************************/
      29       213252 : static float *compute_rows_max_eps(const bool trans, const dbm_matrix_t *matrix,
      30              :                                    const double filter_eps) {
      31       213252 :   const int nrows = (trans) ? matrix->ncols : matrix->nrows;
      32       213252 :   int *nblocks_per_row = calloc(nrows, sizeof(int));
      33       213252 :   float *row_max_eps = malloc(nrows * sizeof(float));
      34       213252 :   assert((nblocks_per_row != NULL && row_max_eps != NULL) || nrows == 0);
      35              : 
      36       213252 : #pragma omp parallel
      37              :   {
      38              : #pragma omp for
      39              :     for (int ishard = 0; ishard < dbm_get_num_shards(matrix); ishard++) {
      40              :       dbm_shard_t *shard = &matrix->shards[ishard];
      41              :       for (int iblock = 0; iblock < shard->nblocks; iblock++) {
      42              :         const dbm_block_t *blk = &shard->blocks[iblock];
      43              :         const int row = (trans) ? blk->col : blk->row;
      44              : #pragma omp atomic
      45              :         ++nblocks_per_row[row];
      46              :       }
      47              :     }
      48              : #pragma omp master
      49              :     cp_mpi_sum_int(nblocks_per_row, nrows, matrix->dist->comm);
      50              : #pragma omp barrier
      51              : #pragma omp for
      52              :     for (int i = 0; i < nrows; i++) {
      53              :       const float f =
      54              :           ((float)filter_eps) / ((float)imax(1, nblocks_per_row[i]));
      55              :       row_max_eps[i] = f * f;
      56              :     }
      57              :   } // end of omp parallel region
      58              : 
      59       213252 :   free(nblocks_per_row);
      60       213252 :   return row_max_eps; // Ownership of row_max_eps transfers to caller.
      61              : }
      62              : 
      63              : /*******************************************************************************
      64              :  * \brief Private struct for storing the context of the multiplication backend.
      65              :  * \author Ole Schuett
      66              :  ******************************************************************************/
      67              : typedef struct {
      68              : #if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
      69              :   dbm_multiply_gpu_context_t gpu;
      70              : #endif
      71              :   int cpu_options; // Binary or'ed dbm_multiply_cpu_options (enum).
      72              : } backend_context_t;
      73              : 
      74              : /*******************************************************************************
      75              :  * \brief Private routine for initializing the multiplication backend.
      76              :  * \author Ole Schuett
      77              :  ******************************************************************************/
      78       213252 : static backend_context_t *backend_start(const dbm_matrix_t *matrix_c) {
      79       213252 :   backend_context_t *const ctx = calloc(1, sizeof(backend_context_t));
      80              :   // BLAS and LIBXSMM benefit in general from DBM_MULTIPLY_TASK_REORDER.
      81       213252 :   ctx->cpu_options = DBM_MULTIPLY_TASK_REORDER;
      82              : 
      83              : #if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
      84              :   dbm_multiply_gpu_start(DBM_MAX_BATCH_SIZE, dbm_get_num_shards(matrix_c),
      85              :                          matrix_c->shards, &ctx->gpu);
      86              : #else
      87       213252 :   (void)matrix_c; // mark as used
      88              : #endif
      89              : 
      90       213252 :   return ctx;
      91              : }
      92              : 
      93              : /*******************************************************************************
      94              :  * \brief Private routine for handing newly arrived packs to the backend.
      95              :  * \author Ole Schuett
      96              :  ******************************************************************************/
      97            0 : static bool backend_upload_packs(const dbm_pack_t *pack_a,
      98              :                                  const dbm_pack_t *pack_b,
      99              :                                  backend_context_t *ctx) {
     100              : #if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
     101              :   return dbm_multiply_gpu_upload_packs(pack_a, pack_b, &ctx->gpu);
     102              : #else
     103            0 :   (void)pack_a; // mark as used
     104            0 :   (void)pack_b;
     105            0 :   (void)ctx;
     106            0 :   return false;
     107              : #endif
     108              : }
     109              : 
     110              : /*******************************************************************************
     111              :  * \brief Private routine for sending a batch to the multiplication backend.
     112              :  * \author Ole Schuett
     113              :  ******************************************************************************/
     114       232460 : static void backend_process_batch(const int ntasks,
     115              :                                   const dbm_task_t batch[ntasks],
     116              :                                   const double alpha, const dbm_pack_t *pack_a,
     117              :                                   const dbm_pack_t *pack_b, const int kshard,
     118              :                                   dbm_shard_t *shard_c, const bool finish,
     119              :                                   const bool force_cpu,
     120              :                                   backend_context_t *ctx) {
     121       232460 :   if (NULL != ctx) {
     122              : #if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
     123              :     if (!force_cpu) {
     124              :       dbm_multiply_gpu_process_batch(ntasks, batch, alpha, shard_c, kshard,
     125              :                                      finish, &ctx->gpu);
     126              :     } else
     127              : #endif
     128              :     {
     129       232460 :       (void)kshard;
     130       232460 :       (void)finish;
     131       232460 :       (void)force_cpu;
     132       232460 :       dbm_multiply_cpu_process_batch(ntasks, batch, alpha, pack_a, pack_b,
     133              :                                      shard_c, ctx->cpu_options);
     134              :     }
     135              :   } else { // Validate against host (aka CPU).
     136            0 :     dbm_multiply_cpu_process_batch(ntasks, batch, alpha, pack_a, pack_b,
     137              :                                    shard_c, DBM_MULTIPLY_BLAS_LIBRARY);
     138              :   }
     139       232460 : }
     140              : 
     141              : /*******************************************************************************
     142              :  * \brief Private routine for shutting down the multiplication backend.
     143              :  * \author Ole Schuett
     144              :  ******************************************************************************/
     145       213252 : static void backend_stop(backend_context_t *ctx) {
     146              : #if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
     147              :   dbm_multiply_gpu_stop(&ctx->gpu);
     148              : #endif
     149       213252 :   free(ctx);
     150       213252 : }
     151              : 
     152              : /*******************************************************************************
     153              :  * \brief Private routine for multipling two packs.
     154              :  * \author Ole Schuett
     155              :  ******************************************************************************/
     156       232386 : static void multiply_packs(const bool transa, const bool transb,
     157              :                            const double alpha, const dbm_pack_t *pack_a,
     158              :                            const dbm_pack_t *pack_b,
     159              :                            const dbm_matrix_t *matrix_a,
     160              :                            const dbm_matrix_t *matrix_b, dbm_matrix_t *matrix_c,
     161              :                            const float *rows_max_eps,
     162              :                            const bool retain_sparsity, const bool force_cpu,
     163              :                            int64_t *flop, backend_context_t *ctx) {
     164              :   // For validation, FLOPS do not count, and relying on ctx is not necessary.
     165       232386 :   backend_context_t *const context = (NULL != flop ? ctx : NULL);
     166       232386 :   const float alpha2 = alpha * alpha;
     167       232386 :   int64_t flop_sum = 0;
     168              : 
     169       232386 :   const int nshard_rows = matrix_c->dist->rows.nshards;
     170       232386 :   const int nshard_cols = matrix_c->dist->cols.nshards;
     171       232386 :   int *shard_row_start = calloc(nshard_rows, sizeof(int));
     172       232386 :   int *shard_col_start = calloc(nshard_cols, sizeof(int));
     173       232386 :   assert(NULL != shard_row_start && NULL != shard_col_start);
     174              : 
     175       232386 :   const int *sum_index_sizes_a =
     176              :       (transa) ? matrix_a->row_sizes : matrix_a->col_sizes;
     177       232386 :   const int *sum_index_sizes_b =
     178              :       (transb) ? matrix_b->col_sizes : matrix_b->row_sizes;
     179       232386 :   const int *free_index_sizes_a =
     180              :       (transa) ? matrix_a->col_sizes : matrix_a->row_sizes;
     181       232386 :   const int *free_index_sizes_b =
     182              :       (transb) ? matrix_b->row_sizes : matrix_b->col_sizes;
     183              : 
     184       232386 : #pragma omp parallel reduction(+ : flop_sum)
     185              :   {
     186              :     // Thread-private array covering given work in piece-wise fashion.
     187              :     dbm_task_t *batch =
     188              :         offload_mempool_host_malloc(sizeof(dbm_task_t) * DBM_MAX_BATCH_SIZE);
     189              : 
     190              :     // Blocks are ordered first by shard. Creating lookup tables of boundaries.
     191              : #pragma omp for nowait
     192              :     for (int iblock = 1; iblock < pack_a->nblocks; iblock++) {
     193              :       const int shard_row = pack_a->blocks[iblock].free_index % nshard_rows;
     194              :       const int prev_shard_row =
     195              :           pack_a->blocks[iblock - 1].free_index % nshard_rows;
     196              :       if (prev_shard_row != shard_row) {
     197              :         shard_row_start[shard_row] = iblock;
     198              :       }
     199              :     }
     200              : #pragma omp for
     201              :     for (int jblock = 1; jblock < pack_b->nblocks; jblock++) {
     202              :       const int shard_col = pack_b->blocks[jblock].free_index % nshard_cols;
     203              :       const int prev_shard_col =
     204              :           pack_b->blocks[jblock - 1].free_index % nshard_cols;
     205              :       if (prev_shard_col != shard_col) {
     206              :         shard_col_start[shard_col] = jblock;
     207              :       }
     208              :     }
     209              : 
     210              : #pragma omp for collapse(2) DBM_OMP_SCHEDULE
     211              :     for (int shard_row = 0; shard_row < nshard_rows; shard_row++) {
     212              :       for (int shard_col = 0; shard_col < nshard_cols; shard_col++) {
     213              :         const int ishard = shard_row * nshard_cols + shard_col;
     214              :         dbm_shard_t *const shard_c = &matrix_c->shards[ishard];
     215              :         int ntasks = 0;
     216              : 
     217              :         // Use a merge-join to find pairs of blocks with matching sum indices.
     218              :         // This utilizes that blocks within a shard are ordered by sum_index.
     219              :         const int iblock_start = shard_row_start[shard_row];
     220              :         int jblock_start = shard_col_start[shard_col];
     221              :         for (int iblock = iblock_start; iblock < pack_a->nblocks; iblock++) {
     222              :           const dbm_pack_block_t *blk_a = &pack_a->blocks[iblock];
     223              :           if (blk_a->free_index % nshard_rows != shard_row) {
     224              :             break;
     225              :           }
     226              :           for (int jblock = jblock_start; jblock < pack_b->nblocks; jblock++) {
     227              :             const dbm_pack_block_t *blk_b = &pack_b->blocks[jblock];
     228              :             if (blk_b->free_index % nshard_cols != shard_col) {
     229              :               jblock = pack_b->nblocks; // break
     230              :               continue;
     231              :             }
     232              :             if (blk_a->sum_index < blk_b->sum_index) {
     233              :               jblock = pack_b->nblocks; // break
     234              :               continue;
     235              :             }
     236              :             if (blk_a->sum_index > blk_b->sum_index) {
     237              :               jblock_start++;
     238              :               continue;
     239              :             }
     240              :             // Found block pair with blk_a->sum_index == blk_b->sum_index.
     241              : 
     242              :             // Check norms.
     243              :             const float result_norm = alpha2 * blk_a->norm * blk_b->norm;
     244              :             if (result_norm < rows_max_eps[blk_a->free_index]) {
     245              :               continue;
     246              :             }
     247              : 
     248              :             // Check block sizes.
     249              :             const int m = free_index_sizes_a[blk_a->free_index];
     250              :             const int n = free_index_sizes_b[blk_b->free_index];
     251              :             const int k = sum_index_sizes_a[blk_a->sum_index];
     252              :             assert(m == matrix_c->row_sizes[blk_a->free_index]);
     253              :             assert(n == matrix_c->col_sizes[blk_b->free_index]);
     254              :             assert(k == sum_index_sizes_b[blk_b->sum_index]);
     255              : 
     256              :             // Get C block.
     257              :             const int row = blk_a->free_index, col = blk_b->free_index;
     258              :             dbm_block_t *blk_c = dbm_shard_lookup(shard_c, row, col);
     259              :             if (blk_c == NULL && retain_sparsity) {
     260              :               continue;
     261              :             } else if (blk_c == NULL) {
     262              :               assert(dbm_get_shard_index(matrix_c, row, col) == ishard);
     263              :               assert(dbm_get_stored_coordinates(matrix_c, row, col) ==
     264              :                      matrix_c->dist->my_rank);
     265              :               blk_c = dbm_shard_promise_new_block(shard_c, row, col, m * n);
     266              :             }
     267              : 
     268              :             // Count flops.
     269              :             const int64_t task_flops = 2LL * m * n * k;
     270              :             if (task_flops == 0) {
     271              :               continue;
     272              :             }
     273              :             flop_sum += task_flops;
     274              :             dbm_library_counter_increment(m, n, k);
     275              : 
     276              :             // Add block multiplication to batch.
     277              :             batch[ntasks].m = m;
     278              :             batch[ntasks].n = n;
     279              :             batch[ntasks].k = k;
     280              :             batch[ntasks].offset_a = blk_a->offset;
     281              :             batch[ntasks].offset_b = blk_b->offset;
     282              :             batch[ntasks].offset_c = blk_c->offset;
     283              :             ++ntasks;
     284              : 
     285              :             if (ntasks == DBM_MAX_BATCH_SIZE) {
     286              :               backend_process_batch(ntasks, batch, alpha, pack_a, pack_b,
     287              :                                     ishard, shard_c, false, force_cpu, context);
     288              :               ntasks = 0;
     289              :             }
     290              :           }
     291              :         }
     292              :         backend_process_batch(ntasks, batch, alpha, pack_a, pack_b, ishard,
     293              :                               shard_c, true, force_cpu, context);
     294              :       }
     295              :     }
     296              : 
     297              :     offload_mempool_host_free(batch);
     298              :   }
     299              : 
     300       232386 :   free(shard_row_start);
     301       232386 :   free(shard_col_start);
     302              : 
     303       232386 :   if (NULL != flop) {
     304       232386 :     *flop += flop_sum;
     305              :   }
     306       232386 : }
     307              : 
     308              : /*******************************************************************************
     309              :  * \brief Performs a multiplication of two dbm_matrix_t matrices.
     310              :  *        See dbm_matrix.h for details.
     311              :  * \author Ole Schuett
     312              :  ******************************************************************************/
     313       213252 : void dbm_multiply(const bool transa, const bool transb, const double alpha,
     314              :                   const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b,
     315              :                   const double beta, dbm_matrix_t *matrix_c,
     316              :                   const bool retain_sparsity, const double filter_eps,
     317              :                   int64_t *flop) {
     318       213252 :   assert(omp_get_num_threads() == 1);
     319       213252 :   assert(matrix_a != NULL && matrix_b != NULL && matrix_c != NULL);
     320              : 
     321              :   // Throughout the matrix multiplication code the "sum_index" and "free_index"
     322              :   // denote the summation (aka dummy) and free index from the Einstein notation.
     323       213252 :   const int num_sum_index_a = (transa) ? matrix_a->nrows : matrix_a->ncols;
     324       213252 :   const int num_sum_index_b = (transb) ? matrix_b->ncols : matrix_b->nrows;
     325       213252 :   const int num_free_index_a = (transa) ? matrix_a->ncols : matrix_a->nrows;
     326       213252 :   const int num_free_index_b = (transb) ? matrix_b->nrows : matrix_b->ncols;
     327              : 
     328              :   // Sanity check matrix dimensions.
     329       213252 :   assert(num_sum_index_a == num_sum_index_b);
     330       213252 :   assert(num_free_index_a == matrix_c->nrows);
     331       213252 :   assert(num_free_index_b == matrix_c->ncols);
     332              : 
     333              :   // Prepare matrix_c (host).
     334       213252 :   dbm_scale(matrix_c, beta);
     335              : 
     336              :   // Determine if validation shall be performed.
     337       213252 :   const char *const maxeps_env = getenv("DBM_MULTIPLY_MAXEPS");
     338       213252 :   const char *const verify_env = getenv("DBM_MULTIPLY_VERIFY");
     339       213252 :   const double maxeps = (NULL == maxeps_env ? 1E-1 : fabs(atof(maxeps_env)));
     340       426504 :   const int verify =
     341       213252 :       (NULL == verify_env ? (NULL == maxeps_env ? 0 : 1) : atoi(verify_env));
     342       213252 :   dbm_matrix_t *matrix_d = NULL;
     343       213252 :   if (0 != verify) {
     344            0 :     dbm_distribution_t *const dist_shared = matrix_c->dist;
     345            0 :     dbm_create(&matrix_d, dist_shared, matrix_c->name, matrix_c->nrows,
     346            0 :                matrix_c->ncols, matrix_c->row_sizes, matrix_c->col_sizes);
     347            0 :     dbm_copy(matrix_d, matrix_c);
     348              :   }
     349              : 
     350              :   // Compute filter thresholds for each row.
     351       213252 :   float *rows_max_eps = compute_rows_max_eps(transa, matrix_a, filter_eps);
     352              : 
     353              :   // Start uploading matrix_c to the GPU.
     354       213252 :   backend_context_t *ctx = backend_start(matrix_c);
     355              : 
     356              :   // Redistribute matrix_a and matrix_b across MPI ranks.
     357       213252 :   dbm_comm_iterator_t *iter =
     358       213252 :       dbm_comm_iterator_start(transa, transb, matrix_a, matrix_b, matrix_c);
     359              : 
     360              :   // Count flops if requested.
     361       213252 :   if (NULL != flop) {
     362       213252 :     *flop = 0;
     363              :   }
     364              : 
     365              :   // Main loop.
     366              :   dbm_pack_t *pack_a, *pack_b;
     367       445638 :   while (dbm_comm_iterator_next(iter, &pack_a, &pack_b)) {
     368       232386 :     const bool uploaded = backend_upload_packs(pack_a, pack_b, ctx);
     369       232386 :     (void)uploaded; // mark used
     370       232386 :     multiply_packs(transa, transb, alpha, pack_a, pack_b, matrix_a, matrix_b,
     371              :                    matrix_c, rows_max_eps, retain_sparsity, false /*!uploaded*/,
     372              :                    flop, ctx);
     373              :   }
     374              : 
     375              :   // Wait for all other MPI ranks to complete, then release ressources.
     376       213252 :   dbm_comm_iterator_stop(iter);
     377       213252 :   backend_stop(ctx);
     378              : 
     379       213252 :   if (NULL != matrix_d) {
     380            0 :     ctx = backend_start(matrix_d);
     381            0 :     iter =
     382            0 :         dbm_comm_iterator_start(transa, transb, matrix_a, matrix_b, matrix_d);
     383            0 :     while (dbm_comm_iterator_next(iter, &pack_a, &pack_b)) {
     384            0 :       multiply_packs(transa, transb, alpha, pack_a, pack_b, matrix_a, matrix_b,
     385              :                      matrix_d, rows_max_eps, retain_sparsity, true, NULL, ctx);
     386              :     }
     387            0 :     dbm_comm_iterator_stop(iter);
     388            0 :     backend_stop(ctx);
     389            0 :     const double epsilon = dbm_maxeps(matrix_d, matrix_c);
     390            0 :     if (maxeps < epsilon) {
     391            0 :       if (1 == verify) {
     392            0 :         fprintf(stderr, "WARN ACC/LIBDBM: diff=%g\n", epsilon);
     393              :       } else {
     394            0 :         fprintf(stderr, "ERROR ACC/LIBDBM: diff=%g\n", epsilon);
     395            0 :         exit(EXIT_FAILURE);
     396              :       }
     397              :     }
     398            0 :     dbm_release(matrix_d);
     399              :   }
     400              : 
     401              :   // Release filter thresholds.
     402       213252 :   free(rows_max_eps);
     403              : 
     404              :   // Final filter pass.
     405       213252 :   dbm_filter(matrix_c, filter_eps);
     406       213252 : }
     407              : 
     408              : // EOF
        

Generated by: LCOV version 2.0-1