ruby-mpi-0.3.2/0000755000004100000410000000000013327342425013315 5ustar www-datawww-dataruby-mpi-0.3.2/.travis.yml0000644000004100000410000000111713327342425015426 0ustar www-datawww-datalanguage: ruby os: - linux - osx rvm: - 2.0.0 - 1.9.3 cache: - bundler - apt addons: apt: packages: - libopenmpi-dev - openmpi-bin before_install: - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install open-mpi; fi before_script: - bundle exec rake compile env: - JRUBY_OPTS=-Xcext.enabled=true script: - bundle exec mpirun -n 2 rspec - bundle exec mpirun -n 2 ruby -Ilib samples/hello.rb - bundle exec mpirun -n 2 ruby -Ilib samples/narray.rb - bundle exec mpirun -n 2 ruby -Ilib samples/narray_offset.rb ruby-mpi-0.3.2/test/0000755000004100000410000000000013327342425014274 5ustar www-datawww-dataruby-mpi-0.3.2/test/test_utils.rb0000644000004100000410000000054513327342425017024 0ustar www-datawww-datarequire "test/unit" $:.unshift File.join(File.dirname(__FILE__), "..", "lib") require "mpi/utils" class TestMPIUtils < Test::Unit::TestCase def setup end def teardown end def test_task_divide [[4,1], [6,3], [7,3], [15,4], [3, 7]].each do |m, size| ary = MPI.task_divide(m, size) assert ary.max-ary.min <= 1 end end end ruby-mpi-0.3.2/.rspec0000644000004100000410000000001013327342425014421 0ustar www-datawww-data--color ruby-mpi-0.3.2/spec/0000755000004100000410000000000013327342425014247 5ustar www-datawww-dataruby-mpi-0.3.2/spec/ruby-mpi_spec.rb0000644000004100000410000001236713327342425017363 0ustar www-datawww-datarequire File.expand_path(File.dirname(__FILE__) + '/spec_helper') if defined?(NumRu::NArray) include NumRu end describe "MPI" do before(:all) do MPI.Init() end after(:all) do MPI.Finalize() end before do @world = MPI::Comm::WORLD end it "should give version" do expect(MPI::VERSION.class).to eq(Fixnum) expect(MPI::SUBVERSION.class).to eq(Fixnum) end it "should give rank and size" do expect(@world.rank.class).to eql(Fixnum) expect(@world.size.class).to eql(Fixnum) expect(@world.size).to be > 0 end it "should send and receive String" do rank = @world.rank message = "Hello from #{rank}" tag = 0 @world.Send(message, 0, tag) if rank != 0 if rank == 0 (@world.size-1).times do |i| str = " "*"Hello from #{i+1}".length status = @world.Recv(str, i+1, tag) expect(status.source).to eql(i+1) expect(status.tag).to eql(tag) expect(str).to match /\AHello from #{i+1}/ end end end it "should send and receive NArray" do tag = 10 rank = @world.rank [NArray[1,2,3], NArray[3.0,2.0,1.0]].each_with_index do |ary0,j| @world.Send(ary0, 0, tag+j) if rank != 0 if rank == 0 (@world.size-1).times do |i| ary1 = NArray.new(ary0.typecode, ary0.total) status = @world.Recv(ary1, i+1, tag+j) expect(status.source).to eql(i+1) expect(status.tag).to eql(tag+j) expect(ary1).to be == ary0 end end end end it "should send and receive without blocking" do tag = 20 rank = @world.rank message = "Hello from #{rank}" if rank != 0 request = @world.Isend(message, 0, tag) status = request.Wait end if rank == 0 (@world.size-1).times do |i| str = " "*"Hello from #{i+1}".length request_recv = @world.Irecv(str, i+1, tag) status = request_recv.Wait expect(status.source).to eql(i+1) expect(status.tag).to eql(tag) expect(str).to match(/\AHello from #{i+1}/) end end end it "should gather data" do rank = @world.rank size = @world.size root = 0 bufsize = 2 sendbuf = rank.to_s*bufsize recvbuf = rank == root ? "?"*bufsize*size : nil @world.Gather(sendbuf, recvbuf, root) if rank == root str = "" size.times{|i| str << i.to_s*bufsize} expect(recvbuf).to eql(str) end end it "should gather data to all processes (allgather)" do rank = @world.rank size = @world.size bufsize = 2 sendbuf = rank.to_s*bufsize recvbuf = "?"*bufsize*size @world.Allgather(sendbuf, recvbuf) str = "" size.times{|i| str << i.to_s*bufsize} expect(recvbuf).to eql(str) end it "should broad cast data (bcast)" do rank = @world.rank root = 0 bufsize = 2 if rank == root buffer = rank.to_s*bufsize else buffer = " "*bufsize end @world.Bcast(buffer, root) expect(buffer).to eql(root.to_s*bufsize) end it "should scatter data" do rank = @world.rank size = @world.size root = 0 bufsize = 2 if rank == root sendbuf = "" size.times{|i| sendbuf << i.to_s*bufsize} else sendbuf = nil end recvbuf = " "*bufsize @world.Scatter(sendbuf, recvbuf, root) expect(recvbuf).to eql(rank.to_s*bufsize) end it "should send and recv data (sendrecv)" do rank = @world.rank size = @world.size dest = rank-1 dest = size-1 if dest < 0 #dest = MPI::PROC_NULL if dest < 0 source = rank+1 source = 0 if source > size-1 #source = MPI::PROC_NULL if source > size-1 sendtag = 30 + rank recvtag = 30 + source bufsize = 2 sendbuf = rank.to_s*bufsize recvbuf = " "*bufsize @world.Sendrecv(sendbuf, dest, sendtag, recvbuf, source, recvtag); if source != MPI::PROC_NULL expect(recvbuf).to eql(source.to_s*bufsize) end end it "should change data between each others (alltoall)" do rank = @world.rank size = @world.size bufsize = 2 sendbuf = rank.to_s*bufsize*size recvbuf = "?"*bufsize*size @world.Alltoall(sendbuf, recvbuf) str = "" size.times{|i| str << i.to_s*bufsize} expect(recvbuf).to eql(str) end it "should reduce data" do rank = @world.rank size = @world.size root = 0 bufsize = 2 sendbuf = NArray.to_na([rank]*bufsize) recvbuf = rank == root ? NArray.new(sendbuf.typecode,bufsize) : nil @world.Reduce(sendbuf, recvbuf, MPI::Op::SUM, root) if rank == root ary = NArray.new(sendbuf.typecode,bufsize).fill(size*(size-1)/2.0) expect(recvbuf).to be == ary end end it "should reduce data and send to all processes (allreduce)" do rank = @world.rank size = @world.size bufsize = 2 sendbuf = NArray.to_na([rank]*bufsize) recvbuf = NArray.new(sendbuf.typecode,bufsize) @world.Allreduce(sendbuf, recvbuf, MPI::Op::SUM) ary = NArray.new(sendbuf.typecode,bufsize).fill(size*(size-1)/2.0) expect(recvbuf).to be == ary end it "should not raise exception in calling barrier" do @world.Barrier end it "shoud raise exeption" do expect { @world.Send("", @world.size+1, 0) }.to raise_error(MPI::ERR::RANK) expect(@world.Errhandler).to eql(MPI::Errhandler::ERRORS_RETURN) end end ruby-mpi-0.3.2/spec/spec_helper.rb0000644000004100000410000000065313327342425017071 0ustar www-datawww-data$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'ext', 'mpi')) $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib')) $LOAD_PATH.unshift(File.dirname(__FILE__)) require 'rspec' require 'mpi' # Requires supporting files with custom matchers and macros, etc, # in ./support/ and its subdirectories. Dir["#{File.dirname(__FILE__)}/support/**/*.rb"].each {|f| require f} RSpec.configure do |config| end ruby-mpi-0.3.2/.document0000644000004100000410000000006713327342425015137 0ustar www-datawww-datalib/**/*.rb bin/* - features/**/*.feature LICENSE.txt ruby-mpi-0.3.2/Rakefile0000644000004100000410000000376013327342425014770 0ustar www-datawww-datarequire 'rubygems' require 'bundler' require "rake/clean" begin Bundler.setup(:default, :development) rescue Bundler::BundlerError => e $stderr.puts e.message $stderr.puts "Run `bundle install` to install missing gems" exit e.status_code end require 'rake' require 'jeweler' Jeweler::Tasks.new do |gem| # gem is a Gem::Specification... see http://docs.rubygems.org/read/chapter/20 for more options gem.name = "ruby-mpi" gem.homepage = "http://github.com/seiya/ruby-mpi" gem.license = "MIT" gem.summary = "A ruby binding of MPI" gem.description = "A ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages." gem.email = "seiya@gfd-dennou.org" gem.authors = ["Seiya Nishizawa"] # Include your dependencies below. Runtime dependencies are required when using your gem, # and development dependencies are only needed for development (ie running rake tasks, tests, etc) # gem.add_runtime_dependency 'jabber4r', '> 0.1' # gem.add_development_dependency 'rspec', '> 1.2.3' end Jeweler::RubygemsDotOrgTasks.new require 'rspec/core' require 'rspec/core/rake_task' RSpec::Core::RakeTask.new(:spec) do |spec| spec.pattern = FileList['spec/**/*_spec.rb'] end RSpec::Core::RakeTask.new(:rcov) do |spec| spec.pattern = 'spec/**/*_spec.rb' spec.rcov = true end task :default => :spec require 'rdoc/task' Rake::RDocTask.new do |rdoc| version = File.exist?('VERSION') ? File.read('VERSION') : "" rdoc.rdoc_dir = 'rdoc' rdoc.title = "ruby-mpi #{version}" rdoc.rdoc_files.include('README*') rdoc.rdoc_files.include('lib/**/*.rb') rdoc.rdoc_files.include('samples/*.rb') end if RUBY_PLATFORM =~ /java/ require "rake/javaextensiontask" Rake::JavaExtensionTask.new("mpi") else require "rake/extensiontask" Rake::ExtensionTask.new("mpi") end CLEAN.include("ext/mpi/*.o") CLEAN.include("ext/mpi/mkmf.log") CLOBBER.include("ext/mpi/mpi.so") CLOBBER.include("ext/mpi/Makefile") ruby-mpi-0.3.2/lib/0000755000004100000410000000000013327342425014063 5ustar www-datawww-dataruby-mpi-0.3.2/lib/mpi.rb0000644000004100000410000000042013327342425015171 0ustar www-datawww-databegin require "rubygems" rescue LoadError end begin require "numru/narray" rescue LoadError err = $! begin require "narray" rescue LoadError STDERR.puts "You should install numru-narray or narray to use ruby-mpi" raise err end end require "mpi.so" ruby-mpi-0.3.2/lib/mpi/0000755000004100000410000000000013327342425014650 5ustar www-datawww-dataruby-mpi-0.3.2/lib/mpi/utils.rb0000644000004100000410000000041713327342425016337 0ustar www-datawww-datamodule MPI module_function def task_divide(m, size) dm = m.to_f/size ary = Array.new(size) ary[0] = dm.round sum = ary[0] (size-1).times do|i| ary[i+1] = (dm*(i+2)).round - sum sum += ary[i+1] end ary end end # module MPI ruby-mpi-0.3.2/README.rdoc0000644000004100000410000000233213327342425015123 0ustar www-datawww-data= Ruby-MPI {Build Status}[https://travis-ci.org/seiya/ruby-mpi] Ruby-MPI is a ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages. == Install # gem install ruby-mpi == How to run Use mpirun or mpiexec to run a script e.g. (run with 4 processes) # mpirun -np 4 ruby hello.rb == Contributing to Ruby-MPI * Check out the latest master to make sure the feature hasn't been implemented or the bug hasn't been fixed yet * Check out the issue tracker to make sure someone already hasn't requested it and/or contributed it * Fork the project * Start a feature/bugfix branch * Commit and push until you are happy with your contribution * Make sure to add tests for it. This is important so I don't break it in a future version unintentionally. * Please try not to mess with the Rakefile, version, or history. If you want to have your own version, or is otherwise necessary, that is fine, but please isolate to its own commit so I can cherry-pick around it. == Copyright Copyright (c) 2011-2016 Seiya Nishizawa. See LICENSE.txt for further details. ruby-mpi-0.3.2/Gemfile0000644000004100000410000000065413327342425014615 0ustar www-datawww-datasource "http://rubygems.org" # Add dependencies required to use your gem here. # Example: # gem "activesupport", ">= 2.3.5" gem "numru-narray", "~> 1.0" # Add dependencies to develop your gem here. # Include everything needed to run rake, tests, features, etc. group :development do gem "rspec", ">= 2.3.0" gem "bundler", ">= 1.0.0" gem "jeweler", ">= 1.5.2" gem "simplecov", ">= 0" gem "rake-compiler", ">= 0" end ruby-mpi-0.3.2/samples/0000755000004100000410000000000013327342425014761 5ustar www-datawww-dataruby-mpi-0.3.2/samples/narray_offset.rb0000644000004100000410000000066213327342425020154 0ustar www-datawww-datarequire "mpi" if defined?(NumRu::NArray) include NumRu end MPI.Init world = MPI::Comm::WORLD if world.size == 1 print "Size is one, so do nothing\n" exit end rank = world.rank size = world.size length = 2 if rank == 0 a = NArray.float(length,size-1) (size-1).times do |i| world.Recv(a, i+1, 1, length, i*length) end p a else a = NArray.float(length).indgen + rank*10 world.Send(a, 0, 1) end MPI.Finalize ruby-mpi-0.3.2/samples/hello.rb0000644000004100000410000000053613327342425016415 0ustar www-datawww-datarequire "mpi" MPI.Init world = MPI::Comm::WORLD if world.size == 1 print "Size is one, so do nothing\n" exit end rank = world.rank if rank == 0 (world.size-1).times do |i| str ="\x00"*100 world.Recv(str, i+1, 0) p str.gsub(/\000/,"") end else message = "Hello from #{rank}" world.Send(message, 0, 0) end MPI.Finalize ruby-mpi-0.3.2/samples/narray.rb0000644000004100000410000000054113327342425016602 0ustar www-datawww-datarequire "mpi" if defined?(NumRu::NArray) include NumRu end MPI.Init world = MPI::Comm::WORLD if world.size == 1 print "Size is one, so do nothing\n" exit end rank = world.rank if rank == 0 (world.size-1).times do |i| a = NArray.float(2) world.Recv(a, i+1, 1) p a end else world.Send(NArray[1.0,2], 0, 1) end MPI.Finalize ruby-mpi-0.3.2/ext/0000755000004100000410000000000013327342425014115 5ustar www-datawww-dataruby-mpi-0.3.2/ext/mpi/0000755000004100000410000000000013327342425014702 5ustar www-datawww-dataruby-mpi-0.3.2/ext/mpi/mpi.c0000644000004100000410000006123313327342425015640 0ustar www-datawww-data#include #include #include #include "ruby.h" #include "narray.h" #include "mpi.h" #if SIZEOF_SHORT == 2 # define NA_MPI_SINT MPI_SHORT #else ---->> Please define NA_MPI_SINT manually because sizeof(short) != 2. <<---- #endif #if SIZEOF_LONG == 4 # define NA_MPI_LINT MPI_LONG #else # if SIZEOF_INT == 4 # define NA_MPI_LINT MPI_INT # else ---->> Please define NA_MPI_LINT manually because sizeof(long) != 4. <<---- # endif #endif #if SIZEOF_LONG_LONG == 8 # define NA_MPI_LLINT MPI_LONG_LONG #else ---->> Please define NA_MPI_LLINT manually because sizeof(long long) != 8. <<---- #endif #define OBJ2C(rb_obj, len, buffer, typ, off) \ {\ if (TYPE(rb_obj) == T_STRING) {\ if (len==0) len = RSTRING_LEN(rb_obj);\ buffer = (void*)(StringValuePtr(rb_obj) + off);\ typ = MPI_BYTE;\ } else if (IsNArray(rb_obj)) {\ struct NARRAY *a;\ GetNArray(rb_obj, a);\ buffer = (void*)(a->ptr);\ if (len==0) len = a->total;\ switch (a->type) {\ case NA_BYTE:\ typ = MPI_BYTE;\ buffer = (void*)((char*)buffer + off);\ break;\ case NA_SINT:\ typ = NA_MPI_SINT;\ buffer = (void*)((char*)buffer + off*2);\ break;\ case NA_LINT:\ typ = NA_MPI_LINT;\ buffer = (void*)((char*)buffer + off*4);\ break;\ case NA_LLINT:\ typ = NA_MPI_LLINT;\ buffer = (void*)((char*)buffer + off*8);\ break;\ case NA_SFLOAT:\ typ = MPI_FLOAT;\ buffer = (void*)((char*)buffer + off*4);\ break;\ case NA_DFLOAT:\ typ = MPI_DOUBLE;\ buffer = (void*)((char*)buffer + off*8);\ break;\ case NA_SCOMPLEX:\ typ = MPI_2COMPLEX;\ buffer = (void*)((char*)buffer + off*8);\ break;\ case NA_DCOMPLEX:\ typ = MPI_2DOUBLE_COMPLEX;\ buffer = (void*)((char*)buffer + off*16);\ break;\ default:\ rb_raise(rb_eArgError, "narray type is invalid");\ }\ } else {\ rb_raise(rb_eArgError, "Only String and NArray are supported");\ }\ } static VALUE mMPI; static VALUE cComm, cRequest, cOp, cErrhandler, cStatus; static VALUE eBUFFER, eCOUNT, eTYPE, eTAG, eCOMM, eRANK, eREQUEST, eROOT, eGROUP, eOP, eTOPOLOGY, eDIMS, eARG, eUNKNOWN, eTRUNCATE, eOTHER, eINTERN, eIN_STATUS, ePENDING, eACCESS, eAMODE, eASSERT, eBAD_FILE, eBASE, eCONVERSION, eDISP, eDUP_DATAREP, eFILE_EXISTS, eFILE_IN_USE, eFILE, eINFO_KEY, eINFO_NOKEY, eINFO_VALUE, eINFO, eIO, eKEYVAL, eLOCKTYPE, eNAME, eNO_MEM, eNOT_SAME, eNO_SPACE, eNO_SUCH_FILE, ePORT, eQUOTA, eREAD_ONLY, eRMA_CONFLICT, eRMA_SYNC, eSERVICE, eSIZE, eSPAWN, eUNSUPPORTED_DATAREP, eUNSUPPORTED_OPERATION, eWIN, eLASTCODE, eSYSRESOURCE; struct _Comm { MPI_Comm Comm; bool free; }; struct _Request { MPI_Request Request; bool free; }; struct _Op { MPI_Op Op; bool free; }; struct _Errhandler { MPI_Errhandler Errhandler; bool free; }; static bool _initialized = false; static bool _finalized = false; #define CAE_ERR(type) case MPI_ERR_ ## type: rb_raise(e ## type,"%s",str); break static void check_error(int error) { if (error == MPI_SUCCESS) return; int code, len; char str[MPI_MAX_ERROR_STRING]; if (MPI_Error_class(error, &code)!=MPI_SUCCESS || MPI_Error_string(error, str, &len)!=MPI_SUCCESS) rb_raise(rb_eRuntimeError, "unknown error occuerd in MPI call"); switch (code) { CAE_ERR(BUFFER); CAE_ERR(COUNT); CAE_ERR(TYPE); CAE_ERR(TAG); CAE_ERR(COMM); CAE_ERR(RANK); CAE_ERR(REQUEST); CAE_ERR(ROOT); CAE_ERR(GROUP); CAE_ERR(OP); CAE_ERR(TOPOLOGY); CAE_ERR(DIMS); CAE_ERR(ARG); CAE_ERR(UNKNOWN); CAE_ERR(TRUNCATE); CAE_ERR(OTHER); CAE_ERR(INTERN); CAE_ERR(IN_STATUS); CAE_ERR(PENDING); CAE_ERR(ACCESS); CAE_ERR(AMODE); CAE_ERR(ASSERT); CAE_ERR(BAD_FILE); CAE_ERR(BASE); CAE_ERR(CONVERSION); CAE_ERR(DISP); CAE_ERR(DUP_DATAREP); CAE_ERR(FILE_EXISTS); CAE_ERR(FILE_IN_USE); CAE_ERR(FILE); CAE_ERR(INFO_KEY); CAE_ERR(INFO_NOKEY); CAE_ERR(INFO_VALUE); CAE_ERR(INFO); CAE_ERR(IO); CAE_ERR(KEYVAL); CAE_ERR(LOCKTYPE); CAE_ERR(NAME); CAE_ERR(NO_MEM); CAE_ERR(NOT_SAME); CAE_ERR(NO_SPACE); CAE_ERR(NO_SUCH_FILE); CAE_ERR(PORT); CAE_ERR(QUOTA); CAE_ERR(READ_ONLY); CAE_ERR(RMA_CONFLICT); CAE_ERR(RMA_SYNC); CAE_ERR(SERVICE); CAE_ERR(SIZE); CAE_ERR(SPAWN); CAE_ERR(UNSUPPORTED_DATAREP); CAE_ERR(UNSUPPORTED_OPERATION); CAE_ERR(WIN); CAE_ERR(LASTCODE); #ifdef MPI_ERR_SYSRESOURCE CAE_ERR(SYSRESOURCE); #endif default: rb_raise(rb_eRuntimeError, "unknown error: %d", code); } } #define DEF_FREE(name, capit) \ static void \ name ## _free(void *ptr)\ {\ struct _ ## name *obj;\ obj = (struct _ ## name*) ptr;\ if (!_finalized && obj->free && obj->name!=MPI_ ## capit ##_NULL)\ check_error(MPI_ ## name ## _free(&(obj->name))); \ free(obj);\ } #define DEF_FREE2(name, capit) \ static void \ name ## _free2(void *ptr)\ {\ struct _ ## name *obj;\ obj = (struct _ ## name*) ptr;\ free(obj);\ } //DEF_FREE(Comm, COMM) DEF_FREE(Request, REQUEST) //DEF_FREE(Op, OP) DEF_FREE(Errhandler, ERRHANDLER) DEF_FREE2(Comm, COMM) DEF_FREE2(Op, OP) DEF_FREE2(Errhandler, ERRHANDLER) static void Status_free(void *ptr) { free((MPI_Status*) ptr); } #define DEF_CONST(v, const, name) \ {\ v = ALLOC(struct _ ## v);\ v->v = const;\ v->free = false;\ rb_define_const(c ## v, #name, Data_Wrap_Struct(c ## v, NULL, v ## _free2, v)); \ } static void _finalize() { if(_initialized && !_finalized) { _finalized = true; check_error(MPI_Finalize()); } } static VALUE rb_m_init(int argc, VALUE *argv, VALUE self) { VALUE argary; int cargc; char ** cargv; VALUE progname; int i; rb_scan_args(argc, argv, "01", &argary); if (NIL_P(argary)) { argary = rb_const_get(rb_cObject, rb_intern("ARGV")); cargc = RARRAY_LEN(argary); } else { Check_Type(argary, T_ARRAY); cargc = RARRAY_LEN(argary); } cargv = ALLOCA_N(char *, cargc+1); progname = rb_gv_get("$0"); cargv[0] = StringValueCStr(progname); for(i=0; iComm, NUM2INT(rerror)); return INT2NUM(ierror); } // MPI::Comm /* static VALUE rb_comm_alloc(VALUE klass) { struct _Comm *ptr = ALLOC(struct _Comm); return Data_Wrap_Struct(klass, NULL, Comm_free, ptr); } */ static VALUE rb_comm_initialize(VALUE self) { rb_raise(rb_eRuntimeError, "not developed yet"); // MPI_Comm_create() // comm->free = true; return Qnil; } static VALUE rb_comm_size(VALUE self) { struct _Comm *comm; int size; Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Comm_size(comm->Comm, &size)); return INT2NUM(size); } static VALUE rb_comm_rank(VALUE self) { struct _Comm *comm; int rank; Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Comm_rank(comm->Comm, &rank)); return INT2NUM(rank); } static VALUE rb_comm_send(VALUE self, VALUE rb_obj, VALUE rb_dest, VALUE rb_tag) { void* buffer; int len=0, dest, tag; MPI_Datatype type; struct _Comm *comm; OBJ2C(rb_obj, len, buffer, type, 0); dest = NUM2INT(rb_dest); tag = NUM2INT(rb_tag); Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Send(buffer, len, type, dest, tag, comm->Comm)); return Qnil; } static VALUE rb_comm_isend(VALUE self, VALUE rb_obj, VALUE rb_dest, VALUE rb_tag) { void* buffer; int len=0, dest, tag; MPI_Datatype type; struct _Comm *comm; struct _Request *request; VALUE rb_request; OBJ2C(rb_obj, len, buffer, type, 0); dest = NUM2INT(rb_dest); tag = NUM2INT(rb_tag); Data_Get_Struct(self, struct _Comm, comm); rb_request = Data_Make_Struct(cRequest, struct _Request, NULL, Request_free, request); request->free = true; check_error(MPI_Isend(buffer, len, type, dest, tag, comm->Comm, &(request->Request))); return rb_request; } static VALUE rb_comm_recv(int argc, VALUE *argv, VALUE self) { VALUE rb_obj, rb_source, rb_tag; VALUE rb_len, rb_offset; // option void* buffer; int source, tag, len = 0, offset = 0; MPI_Datatype type; MPI_Status *status; struct _Comm *comm; rb_scan_args(argc, argv, "32", &rb_obj, &rb_source, &rb_tag, &rb_len, &rb_offset); if (rb_len != Qnil) { len = NUM2INT(rb_len); } if (rb_offset != Qnil) { offset = NUM2INT(rb_offset); } OBJ2C(rb_obj, len, buffer, type, offset); source = NUM2INT(rb_source); tag = NUM2INT(rb_tag); Data_Get_Struct(self, struct _Comm, comm); status = ALLOC(MPI_Status); check_error(MPI_Recv(buffer, len, type, source, tag, comm->Comm, status)); return Data_Wrap_Struct(cStatus, NULL, Status_free, status); } static VALUE rb_comm_irecv(int argc, VALUE *argv, VALUE self) { VALUE rb_obj, rb_source, rb_tag; VALUE rb_len, rb_offset; // option void* buffer; int source, tag, len = 0, offset = 0; MPI_Datatype type; struct _Comm *comm; struct _Request *request; VALUE rb_request; rb_scan_args(argc, argv, "32", &rb_obj, &rb_source, &rb_tag, &rb_len, &rb_offset); if (rb_len != Qnil) { len = NUM2INT(rb_len); } if (rb_offset != Qnil) { offset = NUM2INT(rb_offset); } OBJ2C(rb_obj, len, buffer, type, offset); source = NUM2INT(rb_source); tag = NUM2INT(rb_tag); Data_Get_Struct(self, struct _Comm, comm); rb_request = Data_Make_Struct(cRequest, struct _Request, NULL, Request_free, request); request->free = true; check_error(MPI_Irecv(buffer, len, type, source, tag, comm->Comm, &(request->Request))); return rb_request; } static VALUE rb_comm_gather(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_root) { void *sendbuf, *recvbuf = NULL; int sendcount=0, recvcount = 0; MPI_Datatype sendtype, recvtype = 0; int root, rank, size; struct _Comm *comm; OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype, 0); root = NUM2INT(rb_root); Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Comm_rank(comm->Comm, &rank)); check_error(MPI_Comm_size(comm->Comm, &size)); if (rank == root) { OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype, 0); if (recvcount < sendcount*size) rb_raise(rb_eArgError, "recvbuf is too small"); recvcount = sendcount; } check_error(MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm->Comm)); return Qnil; } static VALUE rb_comm_allgather(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf) { void *sendbuf, *recvbuf; int sendcount=0, recvcount=0; MPI_Datatype sendtype, recvtype; int rank, size; struct _Comm *comm; OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype, 0); Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Comm_rank(comm->Comm, &rank)); check_error(MPI_Comm_size(comm->Comm, &size)); OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype, 0); if (recvcount < sendcount*size) rb_raise(rb_eArgError, "recvbuf is too small"); recvcount = sendcount; check_error(MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm->Comm)); return Qnil; } static VALUE rb_comm_bcast(VALUE self, VALUE rb_buffer, VALUE rb_root) { void *buffer; int count=0; MPI_Datatype type; int root; struct _Comm *comm; OBJ2C(rb_buffer, count, buffer, type, 0); root = NUM2INT(rb_root); Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Bcast(buffer, count, type, root, comm->Comm)); return Qnil; } static VALUE rb_comm_scatter(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_root) { void *sendbuf = NULL, *recvbuf; int sendcount = 0, recvcount=0; MPI_Datatype sendtype = 0, recvtype; int root, rank, size; struct _Comm *comm; OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype, 0); root = NUM2INT(rb_root); Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Comm_rank(comm->Comm, &rank)); check_error(MPI_Comm_size(comm->Comm, &size)); if (rank == root) { OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype, 0); if (sendcount > recvcount*size) rb_raise(rb_eArgError, "recvbuf is too small"); sendcount = recvcount; } check_error(MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm->Comm)); return Qnil; } static VALUE rb_comm_sendrecv(VALUE self, VALUE rb_sendbuf, VALUE rb_dest, VALUE rb_sendtag, VALUE rb_recvbuf, VALUE rb_source, VALUE rb_recvtag) { void *sendbuf, *recvbuf; int sendcount=0, recvcount=0; MPI_Datatype sendtype, recvtype; int dest, source; int sendtag, recvtag; int size; struct _Comm *comm; MPI_Status *status; OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype, 0); Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Comm_size(comm->Comm, &size)); OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype, 0); dest = NUM2INT(rb_dest); source = NUM2INT(rb_source); sendtag = NUM2INT(rb_sendtag); recvtag = NUM2INT(rb_recvtag); status = ALLOC(MPI_Status); check_error(MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm->Comm, status)); return Data_Wrap_Struct(cStatus, NULL, Status_free, status); } static VALUE rb_comm_alltoall(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf) { void *sendbuf, *recvbuf; int sendcount=0, recvcount=0; MPI_Datatype sendtype, recvtype; int size; struct _Comm *comm; OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype, 0); Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Comm_size(comm->Comm, &size)); OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype, 0); if (recvcount < sendcount) rb_raise(rb_eArgError, "recvbuf is too small"); recvcount = recvcount/size; sendcount = sendcount/size; check_error(MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm->Comm)); return Qnil; } static VALUE rb_comm_reduce(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_op, VALUE rb_root) { void *sendbuf, *recvbuf = NULL; int sendcount=0, recvcount = 0; MPI_Datatype sendtype, recvtype = 0; int root, rank, size; struct _Comm *comm; struct _Op *op; OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype, 0); root = NUM2INT(rb_root); Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Comm_rank(comm->Comm, &rank)); check_error(MPI_Comm_size(comm->Comm, &size)); if (rank == root) { OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype, 0); if (recvcount != sendcount) rb_raise(rb_eArgError, "sendbuf and recvbuf has the same length"); if (recvtype != sendtype) rb_raise(rb_eArgError, "sendbuf and recvbuf has the same type"); } Data_Get_Struct(rb_op, struct _Op, op); check_error(MPI_Reduce(sendbuf, recvbuf, sendcount, sendtype, op->Op, root, comm->Comm)); return Qnil; } static VALUE rb_comm_allreduce(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_op) { void *sendbuf, *recvbuf; int sendcount=0, recvcount=0; MPI_Datatype sendtype, recvtype; int rank, size; struct _Comm *comm; struct _Op *op; OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype, 0); Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Comm_rank(comm->Comm, &rank)); check_error(MPI_Comm_size(comm->Comm, &size)); OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype, 0); if (recvcount != sendcount) rb_raise(rb_eArgError, "sendbuf and recvbuf has the same length"); if (recvtype != sendtype) rb_raise(rb_eArgError, "sendbuf and recvbuf has the same type"); Data_Get_Struct(rb_op, struct _Op, op); check_error(MPI_Allreduce(sendbuf, recvbuf, recvcount, recvtype, op->Op, comm->Comm)); return Qnil; } static VALUE rb_comm_get_Errhandler(VALUE self) { struct _Comm *comm; struct _Errhandler *errhandler; VALUE rb_errhandler; Data_Get_Struct(self, struct _Comm, comm); rb_errhandler = Data_Make_Struct(cErrhandler, struct _Errhandler, NULL, Errhandler_free, errhandler); errhandler->free = false; check_error(MPI_Comm_get_errhandler(comm->Comm, &(errhandler->Errhandler))); return rb_errhandler; } static VALUE rb_comm_set_Errhandler(VALUE self, VALUE rb_errhandler) { struct _Comm *comm; struct _Errhandler *errhandler; Data_Get_Struct(self, struct _Comm, comm); Data_Get_Struct(rb_errhandler, struct _Errhandler, errhandler); check_error(MPI_Comm_set_errhandler(comm->Comm, errhandler->Errhandler)); return self; } static VALUE rb_comm_barrier(VALUE self) { struct _Comm *comm; Data_Get_Struct(self, struct _Comm, comm); check_error(MPI_Barrier(comm->Comm)); return self; } // MPI::Request static VALUE rb_request_wait(VALUE self) { MPI_Status *status; struct _Request *request; Data_Get_Struct(self, struct _Request, request); status = ALLOC(MPI_Status); check_error(MPI_Wait(&(request->Request), status)); return Data_Wrap_Struct(cStatus, NULL, Status_free, status); } // MPI::Errhandler static VALUE rb_errhandler_eql(VALUE self, VALUE other) { struct _Errhandler *eh0, *eh1; Data_Get_Struct(self, struct _Errhandler, eh0); Data_Get_Struct(other, struct _Errhandler, eh1); return eh0->Errhandler == eh1->Errhandler ? Qtrue : Qfalse; } // MPI::Status static VALUE rb_status_source(VALUE self) { MPI_Status *status; Data_Get_Struct(self, MPI_Status, status); return INT2NUM(status->MPI_SOURCE); } static VALUE rb_status_tag(VALUE self) { MPI_Status *status; Data_Get_Struct(self, MPI_Status, status); return INT2NUM(status->MPI_TAG); } static VALUE rb_status_error(VALUE self) { MPI_Status *status; Data_Get_Struct(self, MPI_Status, status); return INT2NUM(status->MPI_ERROR); } void Init_mpi() { // MPI mMPI = rb_define_module("MPI"); rb_define_module_function(mMPI, "Init", rb_m_init, -1); rb_define_module_function(mMPI, "Finalize", rb_m_finalize, -1); rb_define_module_function(mMPI, "Abort", rb_m_abort, 2); rb_define_const(mMPI, "VERSION", INT2NUM(MPI_VERSION)); rb_define_const(mMPI, "SUBVERSION", INT2NUM(MPI_SUBVERSION)); rb_define_const(mMPI, "SUCCESS", INT2NUM(MPI_SUCCESS)); rb_define_const(mMPI, "PROC_NULL", INT2NUM(MPI_PROC_NULL)); // MPI::Comm cComm = rb_define_class_under(mMPI, "Comm", rb_cObject); // rb_define_alloc_func(cComm, rb_comm_alloc); rb_define_private_method(cComm, "initialize", rb_comm_initialize, 0); rb_define_method(cComm, "rank", rb_comm_rank, 0); rb_define_method(cComm, "size", rb_comm_size, 0); rb_define_method(cComm, "Send", rb_comm_send, 3); rb_define_method(cComm, "Isend", rb_comm_isend, 3); rb_define_method(cComm, "Recv", rb_comm_recv, -1); rb_define_method(cComm, "Irecv", rb_comm_irecv, -1); rb_define_method(cComm, "Gather", rb_comm_gather, 3); rb_define_method(cComm, "Allgather", rb_comm_allgather, 2); rb_define_method(cComm, "Bcast", rb_comm_bcast, 2); rb_define_method(cComm, "Scatter", rb_comm_scatter, 3); rb_define_method(cComm, "Sendrecv", rb_comm_sendrecv, 6); rb_define_method(cComm, "Alltoall", rb_comm_alltoall, 2); rb_define_method(cComm, "Reduce", rb_comm_reduce, 4); rb_define_method(cComm, "Allreduce", rb_comm_allreduce, 3); rb_define_method(cComm, "Errhandler", rb_comm_get_Errhandler, 0); rb_define_method(cComm, "Errhandler=", rb_comm_set_Errhandler, 1); rb_define_method(cComm, "Barrier", rb_comm_barrier, 0); // MPI::Request cRequest = rb_define_class_under(mMPI, "Request", rb_cObject); rb_define_method(cRequest, "Wait", rb_request_wait, 0); // MPI::Op cOp = rb_define_class_under(mMPI, "Op", rb_cObject); // MPI::Errhandler cErrhandler = rb_define_class_under(mMPI, "Errhandler", rb_cObject); rb_define_method(cErrhandler, "eql?", rb_errhandler_eql, 1); // MPI::Status cStatus = rb_define_class_under(mMPI, "Status", rb_cObject); rb_define_method(cStatus, "source", rb_status_source, 0); rb_define_method(cStatus, "tag", rb_status_tag, 0); rb_define_method(cStatus, "error", rb_status_error, 0); //MPI::ERR VALUE mERR = rb_define_module_under(mMPI, "ERR"); eBUFFER = rb_define_class_under(mERR, "BUFFER", rb_eStandardError); eCOUNT = rb_define_class_under(mERR, "COUNT", rb_eStandardError); eTYPE = rb_define_class_under(mERR, "TYPE", rb_eStandardError); eTAG = rb_define_class_under(mERR, "TAG", rb_eStandardError); eCOMM = rb_define_class_under(mERR, "COMM", rb_eStandardError); eRANK = rb_define_class_under(mERR, "RANK", rb_eStandardError); eREQUEST = rb_define_class_under(mERR, "REQUEST", rb_eStandardError); eROOT = rb_define_class_under(mERR, "ROOT", rb_eStandardError); eGROUP = rb_define_class_under(mERR, "GROUP", rb_eStandardError); eOP = rb_define_class_under(mERR, "OP", rb_eStandardError); eTOPOLOGY = rb_define_class_under(mERR, "TOPOLOGY", rb_eStandardError); eDIMS = rb_define_class_under(mERR, "DIMS", rb_eStandardError); eARG = rb_define_class_under(mERR, "ARG", rb_eStandardError); eUNKNOWN = rb_define_class_under(mERR, "UNKNOWN", rb_eStandardError); eTRUNCATE = rb_define_class_under(mERR, "TRUNCATE", rb_eStandardError); eOTHER = rb_define_class_under(mERR, "OTHER", rb_eStandardError); eINTERN = rb_define_class_under(mERR, "INTERN", rb_eStandardError); eIN_STATUS = rb_define_class_under(mERR, "IN_STATUS", rb_eStandardError); ePENDING = rb_define_class_under(mERR, "PENDING", rb_eStandardError); eACCESS = rb_define_class_under(mERR, "ACCESS", rb_eStandardError); eAMODE = rb_define_class_under(mERR, "AMODE", rb_eStandardError); eASSERT = rb_define_class_under(mERR, "ASSERT", rb_eStandardError); eBAD_FILE = rb_define_class_under(mERR, "BAD_FILE", rb_eStandardError); eBASE = rb_define_class_under(mERR, "BASE", rb_eStandardError); eCONVERSION = rb_define_class_under(mERR, "CONVERSION", rb_eStandardError); eDISP = rb_define_class_under(mERR, "DISP", rb_eStandardError); eDUP_DATAREP = rb_define_class_under(mERR, "DUP_DATAREP", rb_eStandardError); eFILE_EXISTS = rb_define_class_under(mERR, "FILE_EXISTS", rb_eStandardError); eFILE_IN_USE = rb_define_class_under(mERR, "FILE_IN_USE", rb_eStandardError); eFILE = rb_define_class_under(mERR, "FILE", rb_eStandardError); eINFO_KEY = rb_define_class_under(mERR, "INFO_KEY", rb_eStandardError); eINFO_NOKEY = rb_define_class_under(mERR, "INFO_NOKEY", rb_eStandardError); eINFO_VALUE = rb_define_class_under(mERR, "INFO_VALUE", rb_eStandardError); eINFO = rb_define_class_under(mERR, "INFO", rb_eStandardError); eIO = rb_define_class_under(mERR, "IO", rb_eStandardError); eKEYVAL = rb_define_class_under(mERR, "KEYVAL", rb_eStandardError); eLOCKTYPE = rb_define_class_under(mERR, "LOCKTYPE", rb_eStandardError); eNAME = rb_define_class_under(mERR, "NAME", rb_eStandardError); eNO_MEM = rb_define_class_under(mERR, "NO_MEM", rb_eStandardError); eNOT_SAME = rb_define_class_under(mERR, "NOT_SAME", rb_eStandardError); eNO_SPACE = rb_define_class_under(mERR, "NO_SPACE", rb_eStandardError); eNO_SUCH_FILE = rb_define_class_under(mERR, "NO_SUCH_FILE", rb_eStandardError); ePORT = rb_define_class_under(mERR, "PORT", rb_eStandardError); eQUOTA = rb_define_class_under(mERR, "QUOTA", rb_eStandardError); eREAD_ONLY = rb_define_class_under(mERR, "READ_ONLY", rb_eStandardError); eRMA_CONFLICT = rb_define_class_under(mERR, "RMA_CONFLICT", rb_eStandardError); eRMA_SYNC = rb_define_class_under(mERR, "RMA_SYNC", rb_eStandardError); eSERVICE = rb_define_class_under(mERR, "SERVICE", rb_eStandardError); eSIZE = rb_define_class_under(mERR, "SIZE", rb_eStandardError); eSPAWN = rb_define_class_under(mERR, "SPAWN", rb_eStandardError); eUNSUPPORTED_DATAREP = rb_define_class_under(mERR, "UNSUPPORTED_DATAREP", rb_eStandardError); eUNSUPPORTED_OPERATION = rb_define_class_under(mERR, "UNSUPPORTED_OPERATION", rb_eStandardError); eWIN = rb_define_class_under(mERR, "WIN", rb_eStandardError); eLASTCODE = rb_define_class_under(mERR, "LASTCODE", rb_eStandardError); eSYSRESOURCE = rb_define_class_under(mERR, "SYSRESOURCE", rb_eStandardError); } ruby-mpi-0.3.2/ext/mpi/extconf.rb0000644000004100000410000000130613327342425016675 0ustar www-datawww-datarequire "mkmf" CONFIG['CC'] = "mpicc" gem_path = nil begin require "rubygems" if Gem::Specification.respond_to?(:find_by_name) if spec = ( Gem::Specification.find_by_name("numru-narray") || Gem::Specification.find_by_name("narray") ) gem_path = spec.full_gem_path end else if (spec = (Gem.source_index.find_name("numru-narray") || Gem.source_index.find_name("narray")) ).any? gem_path = spec.full_gem_path end end gem_path = File.join(gem_path, "ext", "numru", "narray") rescue LoadError dir_config("narray", RbConfig::CONFIG["sitearchdir"]) end unless find_header("narray.h", gem_path) find_header("narray.h", File.join(gem_path,"src")) end create_makefile("mpi") ruby-mpi-0.3.2/LICENSE.txt0000644000004100000410000000205013327342425015135 0ustar www-datawww-dataCopyright (c) 2011-2016 Seiya Nishizawa Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ruby-mpi-0.3.2/VERSION0000644000004100000410000000000613327342425014361 0ustar www-datawww-data0.3.2 ruby-mpi-0.3.2/ruby-mpi.gemspec0000644000004100000410000000473313327342425016435 0ustar www-datawww-data# Generated by jeweler # DO NOT EDIT THIS FILE DIRECTLY # Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec' # -*- encoding: utf-8 -*- # stub: ruby-mpi 0.3.2 ruby lib # stub: ext/mpi/extconf.rb Gem::Specification.new do |s| s.name = "ruby-mpi" s.version = "0.3.2" s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= s.require_paths = ["lib"] s.authors = ["Seiya Nishizawa"] s.date = "2016-02-15" s.description = "A ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages." s.email = "seiya@gfd-dennou.org" s.extensions = ["ext/mpi/extconf.rb"] s.extra_rdoc_files = [ "LICENSE.txt", "README.rdoc" ] s.files = [ ".document", ".rspec", ".travis.yml", "Gemfile", "LICENSE.txt", "README.rdoc", "Rakefile", "VERSION", "ext/mpi/extconf.rb", "ext/mpi/mpi.c", "lib/mpi.rb", "lib/mpi/utils.rb", "ruby-mpi.gemspec", "samples/hello.rb", "samples/narray.rb", "samples/narray_offset.rb", "spec/ruby-mpi_spec.rb", "spec/spec_helper.rb", "test/test_utils.rb" ] s.homepage = "http://github.com/seiya/ruby-mpi" s.licenses = ["MIT"] s.rubygems_version = "2.4.8" s.summary = "A ruby binding of MPI" if s.respond_to? :specification_version then s.specification_version = 4 if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then s.add_runtime_dependency(%q, ["~> 1.0"]) s.add_development_dependency(%q, [">= 2.3.0"]) s.add_development_dependency(%q, [">= 1.0.0"]) s.add_development_dependency(%q, [">= 1.5.2"]) s.add_development_dependency(%q, [">= 0"]) s.add_development_dependency(%q, [">= 0"]) else s.add_dependency(%q, ["~> 1.0"]) s.add_dependency(%q, [">= 2.3.0"]) s.add_dependency(%q, [">= 1.0.0"]) s.add_dependency(%q, [">= 1.5.2"]) s.add_dependency(%q, [">= 0"]) s.add_dependency(%q, [">= 0"]) end else s.add_dependency(%q, ["~> 1.0"]) s.add_dependency(%q, [">= 2.3.0"]) s.add_dependency(%q, [">= 1.0.0"]) s.add_dependency(%q, [">= 1.5.2"]) s.add_dependency(%q, [">= 0"]) s.add_dependency(%q, [">= 0"]) end end