N
Natsu Mizutani
Hello,
I'm trying to wrap a c++ library using MPI inside with boost.python
(or SWIG).
I managed to find that calling `MPI::Init()` embeded in a c++ funtion
would not work. So, I decided to use `pyMPI`.
To avoid overhead of pickling and unpickling while calling `mpi.send`
in Python, I'd rather call c++ functions `MPI::Isend()` etc. embeded
in a c++ function.
What I'd like to know is the following:
After starting `pyMPI` session, it seems to work call c++ function
`MPI::Isend()` or `MPI::Irecv()`, etc. from the `pyMPI`,
at least for my small test code attached below.
Is it guaranteed to work for larger scale code?
I mean for larger memory allocation, heavily repeated message passing,
, many paralell processes.
Or, the example worked only with some luck?
As for now, `mpiCC` is built without shared library.
Any suggestions?
Thank you.
--Natsu
----- A small test code mpitest.so works like this -----
$ mpirun -np 2 /usr/bin/pyMPI
Python 2.2.1 (#1, Jan 22 2003, 19:07:20)
[GCC 2.95.3 20010315 (release)] on linux2
Copyright (c) 2001, 2002 Python Software Foundation.
All Rights Reserved.
Copyright (c) 2000 BeOpen.com.
All Rights Reserved.
Copyright (c) 1995-2001 Corporation for National Research Initiatives.
All Rights Reserved.
Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.
All Rights Reserved.hello 0 2
hello 1 2100, 99, 98, 97, 96, 95, was a rtmp for node:0
100, 101, 102, 103, 104, 105, was a rtmp for node:1>>>
----- mpitest.so is made from the following using Boost.Python.
//ftest.cpp
#include "ftest.h"
//ftest.h
#include <iostream>
#include <mpi++.h>
class bndry{
MPI::Request sreq_, rreq_;
double* stmp_;
double* rtmp_;
public:
int n_;
bndry(int n) : n_(n) {
stmp_ = new double[n];
rtmp_ = new double[n];
}
~bndry(){
delete [] stmp_;
delete [] rtmp_;
};
void mpiTest(int me, int other) {
double c0 = 1.0;
if (me == 1) c0 = -1.0;
unsigned int ii = 0;
// Create diffrent arrays in diffrent nodes
for (int k = 0; k < n_; ++k) {
stmp_[ii++] = 100.0 + c0 * k;
}
sreq_ = MPI::COMM_WORLD.Isend(stmp_, ii, MPI:OUBLE, other, me);
rreq_ = MPI::COMM_WORLD.Irecv(rtmp_, ii, MPI:OUBLE,
other, other);
rreq_.Wait();
for (int k = 0; k < n_; ++k) {
cout << rtmp_[k] << ", ";
}
cout << " was a rtmp for node:" << me << endl;
sreq_.Wait();
}
};
//mpitest.cpp
#include <boost/python.hpp>
#include <boost/cstdint.hpp>
#include <fblock.h>
using namespace boost:ython;
BOOST_PYTHON_MODULE(mpitest) {
class_< bndry >("bndry", init< const bndry& >())
.def(init< int >())
.def_readwrite("n_", &bndry::n_)
.def("mpiTest", &bndry::mpiTest)
;
}
I'm trying to wrap a c++ library using MPI inside with boost.python
(or SWIG).
I managed to find that calling `MPI::Init()` embeded in a c++ funtion
would not work. So, I decided to use `pyMPI`.
To avoid overhead of pickling and unpickling while calling `mpi.send`
in Python, I'd rather call c++ functions `MPI::Isend()` etc. embeded
in a c++ function.
What I'd like to know is the following:
After starting `pyMPI` session, it seems to work call c++ function
`MPI::Isend()` or `MPI::Irecv()`, etc. from the `pyMPI`,
at least for my small test code attached below.
Is it guaranteed to work for larger scale code?
I mean for larger memory allocation, heavily repeated message passing,
, many paralell processes.
Or, the example worked only with some luck?
As for now, `mpiCC` is built without shared library.
Any suggestions?
Thank you.
--Natsu
----- A small test code mpitest.so works like this -----
$ mpirun -np 2 /usr/bin/pyMPI
Python 2.2.1 (#1, Jan 22 2003, 19:07:20)
[GCC 2.95.3 20010315 (release)] on linux2
Copyright (c) 2001, 2002 Python Software Foundation.
All Rights Reserved.
Copyright (c) 2000 BeOpen.com.
All Rights Reserved.
Copyright (c) 1995-2001 Corporation for National Research Initiatives.
All Rights Reserved.
Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.
All Rights Reserved.hello 0 2
hello 1 2100, 99, 98, 97, 96, 95, was a rtmp for node:0
100, 101, 102, 103, 104, 105, was a rtmp for node:1>>>
----- mpitest.so is made from the following using Boost.Python.
//ftest.cpp
#include "ftest.h"
//ftest.h
#include <iostream>
#include <mpi++.h>
class bndry{
MPI::Request sreq_, rreq_;
double* stmp_;
double* rtmp_;
public:
int n_;
bndry(int n) : n_(n) {
stmp_ = new double[n];
rtmp_ = new double[n];
}
~bndry(){
delete [] stmp_;
delete [] rtmp_;
};
void mpiTest(int me, int other) {
double c0 = 1.0;
if (me == 1) c0 = -1.0;
unsigned int ii = 0;
// Create diffrent arrays in diffrent nodes
for (int k = 0; k < n_; ++k) {
stmp_[ii++] = 100.0 + c0 * k;
}
sreq_ = MPI::COMM_WORLD.Isend(stmp_, ii, MPI:OUBLE, other, me);
rreq_ = MPI::COMM_WORLD.Irecv(rtmp_, ii, MPI:OUBLE,
other, other);
rreq_.Wait();
for (int k = 0; k < n_; ++k) {
cout << rtmp_[k] << ", ";
}
cout << " was a rtmp for node:" << me << endl;
sreq_.Wait();
}
};
//mpitest.cpp
#include <boost/python.hpp>
#include <boost/cstdint.hpp>
#include <fblock.h>
using namespace boost:ython;
BOOST_PYTHON_MODULE(mpitest) {
class_< bndry >("bndry", init< const bndry& >())
.def(init< int >())
.def_readwrite("n_", &bndry::n_)
.def("mpiTest", &bndry::mpiTest)
;
}