Many parallel and distributed message-passing programs are written in a parametric way over available resources, in particular the number of nodes and their topologies, so that a single parallel program can scale over different environments. This paper presents a parameterised protocol description language, Pabble, which can guarantee safety and progress in a large class of practical, complex parameterised message-passing programs through static checking.
Pabble can describe an overall interaction topology, using a concise and expressive notation, designed for a variable number of participants arranged in multiple dimensions. These parameterised protocols in turn automatically generate local protocols for type checking parameterised MPI programs for communication safety and deadlock freedom. In spite of undecidability of endpoint projection and type checking in the underlying parameterised session type theory, our method guarantees the termination of endpoint projection and type checking.
An extended version was published in the Journal of Service Oriented Computing and Architecture (SOCA)
Published version via DOI | Paper | www | www (journal)
global protocol NBody(role W[0..N]) {
rec LOOP {
(float) from Worker[i:0..N-1] to W[i+1];
(float) from Worker[N] to W[0];
/* Calculate Velocity */
continue LOOP;
}
/* Calculate Step */
}
global protocol LoadBalancing(role Worker[0..N]) {
rec REPEAT {
oneof (Worker[i in 1..N]) {
request() from Worker[i] to Worker[0];
choice at Worker[0] {
finish() from Worker[0] to Worker[i];
foreach (x:1..N except i) {
request() from Worker[x] to Worker[0];
finish() from Worker[0] to Worker[x];
}
} or {
reply() from Worker[0] to Worker[i]; continue REPEAT;
}
}
}
}
global protocol DenseMatVec(role Worker[0..N]) {
// Scatter Matrix A
foreach (i:1..N) {
LBound(int) from Worker[0] to Worker[i];
UBound(int) from Worker[0] to Worker[i];
Data(double) from Worker[0] to Worker[i];
}
// Scatter Vector B
(double) from Worker[0] to Worker[1..N];
// --- Perform calculation ---
// Gather data
(double) from Worker[1..N] to Worker[0];
}
global protocol SparseMatVec(role PE[0..N]) {
/* Distribute data */
(int) from W[0] to W[1..N]; // row_ptr
(int) from W[0] to W[1..N]; // col_ind
(double) from W[0] to W[1..N]; // vals
(double) from W[0] to W[1..N]; // vector
/* Output vector */
(double) from W[1..N] to W[0];
}
const N = 3;
global protocol FFT(role W[0..7]) {
foreach (r:0..N-1) {
foreach (i:0..2<<N-1) {
Label(int) from Worker[i] to Worker[i - (i/1<<r)%2 * 1<<(r+1) + 1<<r];
}
}
}
global protocol Solver(role W[1..N][1..N], group Col={W[1..N][1]}) {
rec CONVERGE {
Ring(double) from W[i:1..N][j:1..N-1] to W[i][j+1];
Ring(double) from W[i:1..N][N] to W[i][1];
// Vertical propagation
(double) from Col to Col;
continue CONVERGE;
}
}
global protocol MonteCarloPi(role Worker[0..N]) {
// Calculation per process
Count(int) from Worker[0] to Worker[1..N];
Result(double) from Worker[1..N] to Worker[0];
// Final result calculated at Worker[0];
}