pomerol 2.2
Loading...
Searching...
No Matches
mpi_skel.hpp
Go to the documentation of this file.
1//
2// This file is part of pomerol, an exact diagonalization library aimed at
3// solving condensed matter models of interacting fermions.
4//
5// Copyright (C) 2016-2026 A. Antipov, I. Krivenko and contributors
6//
7// This Source Code Form is subject to the terms of the Mozilla Public
8// License, v. 2.0. If a copy of the MPL was not distributed with this
9// file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
15
16#ifndef POMEROL_INCLUDE_MPI_DISPATCHER_MPI_SKEL_HPP
17#define POMEROL_INCLUDE_MPI_DISPATCHER_MPI_SKEL_HPP
18
19#include "misc.hpp"
20#include "mpi_dispatcher.hpp"
21
22#include <algorithm>
23#include <cstddef>
24#include <iostream>
25#include <map>
26#include <memory>
27#include <numeric>
28#include <tuple>
29#include <vector>
30
31namespace pMPI {
32
35
39template <typename PartType> struct ComputeWrap {
41 PartType& x;
44
48 explicit ComputeWrap(PartType& x, int complexity = 1) : x(x), complexity(complexity) {}
50 void run() { x.compute(); }
51};
52
56template <typename PartType> struct PrepareWrap {
58 PartType& x;
61
65 explicit PrepareWrap(PartType& x, int complexity = 1) : x(x), complexity(complexity) {}
67 void run() { x.prepare(); }
68};
69
73template <typename WrapType> struct mpi_skel {
75 std::vector<WrapType> parts;
81 std::map<pMPI::JobId, pMPI::WorkerId> run(MPI_Comm const& Comm, bool VerboseOutput = true);
82};
83
84template <typename WrapType>
85std::map<pMPI::JobId, pMPI::WorkerId> mpi_skel<WrapType>::run(MPI_Comm const& Comm, bool VerboseOutput) {
86 int comm_rank = pMPI::rank(Comm);
87 int comm_size = pMPI::size(Comm);
88 int const root = 0;
89 MPI_Barrier(Comm);
90
91 if(comm_rank == root) {
92 std::cout << "Calculating " << parts.size() << " jobs using " << comm_size << " procs.\n";
93 }
94
95 std::unique_ptr<pMPI::MPIMaster> disp;
96
97 if(comm_rank == root) {
98 // prepare one Master on a root process for distributing parts.size() jobs
99 std::vector<pMPI::JobId> job_order(parts.size());
100 std::iota(job_order.begin(), job_order.end(), 0);
101
102 auto comp1 = [this](std::size_t l, std::size_t r) -> int {
103 return (parts[l].complexity > parts[r].complexity);
104 };
105 std::sort(job_order.begin(), job_order.end(), comp1);
106 disp.reset(new pMPI::MPIMaster(Comm, job_order, true));
107 }
108
109 MPI_Barrier(Comm);
110
111 // Start calculating data
112 for(pMPI::MPIWorker worker(Comm, root); !worker.is_finished();) {
113 if(comm_rank == root)
114 disp->order();
115 worker.receive_order();
116 if(worker.is_working()) { // for a specific worker
117 JobId p = worker.current_job();
118 if(VerboseOutput)
119 std::cout << "[" << p + 1 << "/" << parts.size() << "] P" << comm_rank << " : part " << p << " ["
120 << parts[p].complexity << "] run;\n";
121 parts[p].run();
122 worker.report_job_done();
123 }
124 if(comm_rank == root)
125 disp->check_workers(); // check if there are free workers
126 }
127
128 // at this moment all communication is finished
129 MPI_Barrier(Comm);
130 // Now spread the information, who did what.
131 if(VerboseOutput && comm_rank == root)
132 std::cout << "done.\n";
133
134 MPI_Barrier(Comm);
135 std::map<pMPI::JobId, pMPI::WorkerId> job_map;
136 if(comm_rank == root) {
137 job_map = disp->DispatchMap;
138 long n_jobs = job_map.size();
139 std::vector<pMPI::JobId> jobs(n_jobs);
140 std::vector<pMPI::WorkerId> workers(n_jobs);
141
142 auto it = job_map.cbegin();
143 for(int i = 0; i < n_jobs; ++i, ++it) {
144 std::tie(jobs[i], workers[i]) = *it;
145 }
146
147 MPI_Bcast(&n_jobs, 1, MPI_LONG, root, Comm);
148 MPI_Bcast(jobs.data(), n_jobs, MPI_INT, root, Comm);
149 MPI_Bcast(workers.data(), n_jobs, MPI_INT, root, Comm);
150 } else {
151 long n_jobs;
152 MPI_Bcast(&n_jobs, 1, MPI_LONG, root, Comm);
153 std::vector<pMPI::JobId> jobs(n_jobs);
154 MPI_Bcast(jobs.data(), n_jobs, MPI_INT, root, Comm);
155 std::vector<pMPI::WorkerId> workers(n_jobs);
156 MPI_Bcast(workers.data(), n_jobs, MPI_INT, root, Comm);
157 for(std::size_t i = 0; i < n_jobs; ++i)
158 job_map[jobs[i]] = workers[i];
159 }
160 return job_map;
161}
162
164
165} // namespace pMPI
166
167#endif // #ifndef POMEROL_INCLUDE_MPI_DISPATCHER_MPI_SKEL_HPP
int rank(MPI_Comm const &Comm)
Definition misc.hpp:36
int JobId
ID of a job.
int size(MPI_Comm const &Comm)
Definition misc.hpp:28
std::map< pMPI::JobId, pMPI::WorkerId > run(MPI_Comm const &Comm, bool VerboseOutput=true)
Definition mpi_skel.hpp:85
A master-worker parallelization scheme using non-blocking MPI communications.
A bunch of tools used for MPI-parallelization of computations.
Definition misc.hpp:21
Declarations of the most basic types and macros.
Wrapper around a computable object that calls the compute() method of the wrapped object and carries ...
Definition mpi_skel.hpp:39
void run()
Call compute() of the wrapped object x.
Definition mpi_skel.hpp:50
PartType & x
Reference to the wrapped object.
Definition mpi_skel.hpp:41
ComputeWrap(PartType &x, int complexity=1)
Definition mpi_skel.hpp:48
int complexity
Complexity of a call to x.compute().
Definition mpi_skel.hpp:43
Abstraction of an MPI master process.
Abstraction of an MPI worker process.
bool is_finished()
Has this worker process finished execution.
Wrapper around a computable object that calls the prepare() method of the wrapped object and carries ...
Definition mpi_skel.hpp:56
PrepareWrap(PartType &x, int complexity=1)
Definition mpi_skel.hpp:65
int complexity
Complexity of a call to x.prepare().
Definition mpi_skel.hpp:60
PartType & x
Reference to the wrapped object.
Definition mpi_skel.hpp:58
void run()
Call prepare() of the wrapped object x.
Definition mpi_skel.hpp:67
This structure carries a list of wrappers and uses the mpi_dispatcher mechanism to distribute the wra...
Definition mpi_skel.hpp:73
std::vector< WrapType > parts
List of wrappers.
Definition mpi_skel.hpp:75