Параллельное программирование с использованием MPI. Антонов (2004) (Параллельное программирование с использованием MPI. Антонов (2004).pdf), страница 3
Описание файла
PDF-файл из архива "Параллельное программирование с использованием MPI. Антонов (2004).pdf", который расположен в категории "". Всё это находится в предмете "суперкомпьютерное моделирование и технологии" из 11 семестр (3 семестр магистратуры), которые можно найти в файловом архиве МГУ им. Ломоносова. Не смотря на прямую связь этого архива с МГУ им. Ломоносова, его также можно найти и в других разделах. .
Просмотр PDF-файла онлайн
Текст 3 страницы из PDF
0) thencall MPI_BUFFER_ATTACH(buf, BUFSIZE, ierr)call MPI_BSEND(rank, 1, MPI_INTEGER, 1, 5,&MPI_COMM_WORLD, ierr)call MPI_BUFFER_DETACH(buf, ibufsize, ierr)end ifif(rank .eq. 1) thencall MPI_RECV(rbuf, 1, MPI_INTEGER, 0, 5,&MPI_COMM_WORLD, status, ierr)print *, 'Process 1 received ', rbuf, ' from process ',&status(MPI_SOURCE)end ifcall MPI_FINALIZE(ierr)endMPI_RECV(BUF, COUNT, DATATYPE, SOURCE, MSGTAG, COMM, STATUS,IERR)<type> BUF(*)INTEGER COUNT, DATATYPE, SOURCE, MSGTAG, COMM, IERR,STATUS(MPI_STATUS_SIZE);ehdbjmxsbcijb_f\[mn_j BUFg_[he__ COUNTwe_f_glh\khh[s_gbylbiZDATATYPEkb^_glbnbdZlhjhf MSGTAG hlijhp_kkZkghf_jhf SOURCE\dhffmgbdZlhj_ COMMkaZiheg_gb_ffZkkb\ZZljb[mlh\ijboh^ys_]hkhh[s_gbySTATUS?kebqbkehj_Zevghijbgyluowe_f_glh\f_gvr_agZq_gby COUNTlh]ZjZglbjm_lky qlh \ [mn_j_ BUF baf_gylky lhevdh we_f_glukhhl\_lkl\mxsb_ we_f_glZf ijbgylh]h khh[s_gby ?keb dhebq_kl\hwe_f_glh\\ijbgbfZ_fhfkhh[s_gbb[hevr_agZq_gby COUNTlh\hagbdZ_lhrb[dZ i_j_iheg_gby Qlh[u ba[_`Zlv wlh]h fh`gh kgZqZeZ hij_^_eblvkljmdlmjm ijboh^ys_]h khh[s_gby ijb ihfhsb ijhp_^mju MPI_PROBE(MPI_IPROBE ?keb gm`gh magZlv lhqgh_ qbkeh we_f_glh\ \ ijbgbfZ_fhfkhh[s_gbb lh fh`gh \hkihevah\Zlvky ijhp_^mjhc MPI_GET_COUNT.;ehdbjh\dZ ]ZjZglbjm_l qlh ihke_ \ha\jZlZ ba ijhp_^mju MPI_RECV \k_we_f_glukhh[s_gbym`_[m^mlijbgylubjZkiheh`_gu\[mn_j_BUF.Gb`_ijb\_^_gijbf_jijh]jZffu\dhlhjhcgme_\hcijhp_kkihkueZ_lkhh[s_gb_ ijhp_kkm k ghf_jhf h^bg b `^_l hl g_]h hl\_lZ ?keb ijh]jZffZ[m^_l aZims_gZ k [hevrbf qbkehf ijhp_kkh\ lh j_Zevgh \uihegylv i_j_kuedb\k_jZ\ghklZgmllhevdhgme_\hcbi_j\ucijhp_kkuHklZevgu_ijhp_kkuihke_bobgbpbZebaZpbbijhp_^mjhc MPI_INITgZi_qZlZxlgZqZevgu_16agZq_gby i_j_f_gguo a b b ihke_ q_]h aZ\_jrZlky \uihegb\ ijhp_^mjmMPI_FINALIZE.program example5include 'mpif.h'integer ierr, size, rankreal a, binteger status(MPI_STATUS_SIZE)call MPI_INIT(ierr)call MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr)call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)a = 0.0b = 0.0if(rank .eq.
0) thenb = 1.0call MPI_SEND(b, 1, MPI_REAL, 1, 5,&MPI_COMM_WORLD, ierr);call MPI_RECV(a, 1, MPI_REAL, 1, 5,&MPI_COMM_WORLD, status, ierr);elseif(rank .eq. 1) thena = 2.0call MPI_RECV(b, 1, MPI_REAL, 0, 5,&MPI_COMM_WORLD, status, ierr);call MPI_SEND(a, 1, MPI_REAL, 0, 5,&MPI_COMM_WORLD, ierr);end ifend ifprint *, 'process ', rank,' a = ', a, ', b = ', bcall MPI_FINALIZE(ierr)end<ke_^mxs_fijbf_j_dZ`^ucijhp_kkkq_lgufghf_jhfihkueZ_lkhh[s_gb_ k\h_fm khk_^m k ghf_jhf gZ _^bgbpm [hevrbf >hihegbl_evgh ihklZ\e_gZ ijh\_jdZ ^ey ijhp_kkZ k fZdkbfZevguf ghf_jhf qlh[u hg g_ ihkeZekhh[s_gb_ g_kms_kl\mxs_fm ijhp_kkm AgZq_gby i_j_f_gghc b baf_gylkylhevdhgZijhp_kkZokg_q_lgufbghf_jZfbprogram example6include 'mpif.h'integer ierr, size, rank, a, binteger status(MPI_STATUS_SIZE)call MPI_INIT(ierr)call MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr)call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)a = rankb = -1if(mod(rank, 2) .eq.
0) thenif(rank+1 .lt. size) then&ihkueZxl\k_ijhp_kkudjhf_ihke_^g_]hcall MPI_Send(a, 1, MPI_INTEGER, rank+1, 5,17&MPI_COMM_WORLD, ierr);end ifelsecall MPI_Recv(b, 1, MPI_INTEGER, rank-1, 5,&MPI_COMM_WORLD, status, ierr);end ifprint *, 'process ', rank,' a = ', a, ', b = ', bcall MPI_FINALIZE(ierr)endIjbijb_f_khh[s_gby\f_klhZj]mf_glh\ SOURCEb MSGTAGfh`ghbkihevah\Zlvke_^mxsb_ij_^hij_^_e_ggu_dhgklZglu• MPI_ANY_SOURCE — ijbagZd lh]h qlh ih^oh^bl khh[s_gb_ hl ex[h]hijhp_kkZ• MPI_ANY_TAG — ijbagZd lh]h qlh ih^oh^bl khh[s_gb_ k ex[ufb^_glbnbdZlhjhfIjbh^gh\j_f_gghfbkihevah\Zgbbwlbo^\modhgklZgl[m^_lijbgylhkhh[s_gb_kex[ufb^_glbnbdZlhjhfhlex[h]hijhp_kkZJ_Zevgu_ Zljb[mlu ijbgylh]h khh[s_gby \k_]^Z fh`gh hij_^_eblv ihkhhl\_lkl\mxsbf we_f_glZf fZkkb\Z status < NhjljZg_ iZjZf_lj statusy\ey_lky p_ehqbke_gguf fZkkb\hf jZaf_jZ MPI_STATUS_SIZE.
DhgklZgluMPI_SOURCE, MPI_TAGbMPI_ERRORy\eyxlkybg^_dkZfbih^ZgghfmfZkkb\m^ey^hklmiZdagZq_gbyfkhhl\_lkl\mxsboihe_c• status(MPI_SOURCE) — ghf_jijhp_kkZ-hlijZ\bl_eykhh[s_gby• status(MPI_TAG) — b^_glbnbdZlhjkhh[s_gby• status(MPI_ERROR) — dh^ hrb[db.< yaud_ Kb iZjZf_lj status y\ey_lky kljmdlmjhc ij_^hij_^_e_ggh]h lbiZMPI_StatuskiheyfbMPI_SOURCE, MPI_TAGbMPI_ERROR.H[jZlbf \gbfZgb_ gZ g_dhlhjmx g_kbff_ljbqghklv hi_jZpbc ihkuedb bijb_fZ khh[s_gbc K ihfhsvx dhgklZglu MPI_ANY_SOURCE fh`gh ijbgylvkhh[s_gb_hlex[h]hijhp_kkZH^gZdh\kemqZ_ihkuedb^Zgguolj_[m_lkyy\ghmdZaZlvghf_jijbgbfZxs_]hijhp_kkZ<klZg^Zjl_h]h\hj_ghqlh_kebh^bgijhp_kkihke_^h\Zl_evghihkueZ_l^\Zkhh[s_gby khhl\_lkl\mxsb_ h^ghfm b lhfm `_ \uah\m MPI_RECV ^jm]hfmijhp_kkm lh i_j\uf [m^_l ijbgylh khh[s_gb_ dhlhjh_ [ueh hlijZ\e_ghjZgvr_ <f_kl_ k l_f _keb ^\Z khh[s_gby [ueb h^gh\j_f_ggh hlijZ\e_gujZagufbijhp_kkZfblhihjy^hdboihemq_gbyijbgbfZxsbfijhp_kkhfaZjZg__g_hij_^_e_g18MPI_GET_COUNT(STATUS, DATATYPE, COUNT, IERR)INTEGER COUNT, DATATYPE, IERR, STATUS(MPI_STATUS_SIZE)IhagZq_gbxiZjZf_ljZ STATUSijhp_^mjZhij_^_ey_lqbkeh COUNTm`_ijbgyluoihke_h[jZs_gbyd MPI_RECVbebijbgbfZ_fuoihke_h[jZs_gbydMPI_PROBE beb MPI_IPROBE we_f_glh\ khh[s_gby lbiZ DATATYPE >ZggZyijhp_^mjZ \ qZklghklb g_h[oh^bfZ ^ey hij_^_e_gby jZaf_jZ h[eZklb iZfylb\u^_ey_fhc^eyojZg_gbyijbgbfZ_fh]hkhh[s_gbyMPI_PROBE(SOURCE, MSGTAG, COMM, STATUS, IERR)INTEGER SOURCE, MSGTAG, COMM, IERR, STATUS(MPI_STATUS_SIZE)Ihemq_gb_ \ fZkkb\_ STATUS bgnhjfZpbb h kljmdlmj_ h`b^Z_fh]h khh[s_gby k b^_glbnbdZlhjhf MSGTAG hl ijhp_kkZ k ghf_jhf SOURCE \dhffmgbdZlhj_ COMM k [ehdbjh\dhc <ha\jZlZ ba ijhp_^mju g_ ijhbahc^_l^h l_o ihj ihdZ khh[s_gb_ k ih^oh^ysbf b^_glbnbdZlhjhf b ghf_jhfijhp_kkZ-hlijZ\bl_ey g_ [m^_l ^hklmigh ^ey ihemq_gby Ke_^m_l hkh[hh[jZlblv \gbfZgb_ gZ lh qlh ijhp_^mjZ hij_^_ey_l lhevdh nZdl ijboh^Zkhh[s_gby gh j_Zevgh _]h g_ ijbgbfZ_l ?keb ihke_ \uah\Z MPI_PROBE\uau\Z_lky MPI_RECV k lZdbfb `_ iZjZf_ljZfb lh [m^_l ijbgylh lh `_kZfh_khh[s_gb_bgnhjfZpbyhdhlhjhf[ueZihemq_gZkihfhsvx\uah\Zijhp_^mjuMPI_PROBE.Ke_^mxsbc ijbf_j ^_fhgkljbjm_l ijbf_g_gb_ ijhp_^mju MPI_PROBE ^eyhij_^_e_gbykljmdlmjuijboh^ys_]hkhh[s_gbyIjhp_kk0`^_lkhh[s_gbyhlex[h]hbaijhp_kkh\ 1b 2kh^gbfbl_f`_l_]hfH^gZdhihkueZ_fu_wlbfbijhp_kkZfb^Zggu_bf_xljZaguclbi>eylh]hqlh[uhij_^_eblv\dZdmx i_j_f_ggmx ihf_sZlv ijboh^ys__ khh[s_gb_ ijhp_kk kgZqZeZ ijbihfhsb\uah\Z MPI_PROBEhij_^_ey_lhldh]h`_bf_gghihklmibehwlhkhh[s_gb_Ke_^mxsbcg_ihkj_^kl\_gghihke_ MPI_PROBE\uah\ MPI_RECV]ZjZglbjh\Zgghijbf_lgm`gh_khh[s_gb_ihke_q_]hijbgbfZ_lkykhh[s_gb_hl^jm]h]hijhp_kkZprogram example7include 'mpif.h'integer rank, ierr, ibuf, status(MPI_STATUS_SIZE)real rbufcall MPI_INIT(ierr)call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)ibuf = rankrbuf = 1.0 * rankif(rank .eq.
1) call MPI_SEND(ibuf, 1, MPI_INTEGER, 0, 5,&MPI_COMM_WORLD, ierr)if(rank .eq. 2) call MPI_SEND(rbuf, 1, MPI_REAL, 0, 5,&MPI_COMM_WORLD, ierr)if(rank .eq. 0) thencall MPI_PROBE(MPI_ANY_SOURCE, 5, MPI_COMM_WORLD,&status, ierr)19if(status(MPI_SOURCE) .EQ. 1) thencall MPI_RECV(ibuf, 1, MPI_INTEGER, 1, 5,&MPI_COMM_WORLD, status, ierr)call MPI_RECV(rbuf, 1, MPI_REAL, 2, 5,&MPI_COMM_WORLD, status, ierr)elseif(status(MPI_SOURCE) .EQ. 2) thencall MPI_RECV(rbuf, 1, MPI_REAL, 2, 5,&MPI_COMM_WORLD, status, ierr)call MPI_RECV(ibuf, 1, MPI_INTEGER, 1, 5,&MPI_COMM_WORLD, status, ierr)end ifend ifprint *, 'Process 0 recv ', ibuf, ' from process 1, ',&rbuf, ' from process 2'end ifcall MPI_FINALIZE(ierr)end<ke_^mxs_fijbf_j_fh^_ebjm_lkyihke_^h\Zl_evguch[f_gkhh[s_gbyfbf_`^m^\mfyijhp_kkZfbaZf_jy_lky\j_fygZh^gmbl_jZpbxh[f_gZhij_^_ey_lkyaZ\bkbfhklv\j_f_gbh[f_gZhl^ebgukhh[s_gbyLZdbfh[jZahfhij_^_eyxlky [Zah\u_ oZjZdl_jbklbdb dhffmgbdZpbhgghc k_lb iZjZee_evgh]h dhfivxl_jZ eZl_glghklv \j_fy gZ i_j_^Zqm khh[s_gby gme_\hc^ebgubfZdkbfZevgh^hklb`bfZyijhimkdgZykihkh[ghklvdhebq_kl\hf_]Z[Zcl\k_dmg^mdhffmgbdZpbhgghck_lbZlZd`_^ebgZkhh[s_gbcgZdhlhjhchgZ^hklb]Z_lkyDhgklZglZNMAXaZ^Z_lh]jZgbq_gb_gZfZdkbfZevgmx^ebgm ihkueZ_fh]h khh[s_gby Z dhgklZglZ NTIMES hij_^_ey_l dhebq_kl\hih\lhj_gbc ^ey mkj_^g_gby j_amevlZlZ KgZqZeZ ihkueZ_lky khh[s_gb_gme_\hc ^ebgu ^ey hij_^_e_gby eZl_glghklb aZl_f ^ebgZ khh[s_gbcm^\Zb\Z_lkygZqbgZykihkuedbh^gh]hwe_f_glZlbiZreal*8.program example8include 'mpif.h'integer ierr, rank, size, i, n, lmax, NMAX, NTIMESparameter (NMAX = 1 000 000, NTIMES = 10)double precision time_start, time, bandwidth, maxreal*8 a(NMAX)integer status(MPI_STATUS_SIZE)call MPI_INIT(ierr)call MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr)call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)time_start = MPI_WTIME(ierr)n = 0max = 0.0lmax = 0do while(n .le.
NMAX)time_start = MPI_WTIME(ierr)20do i = 1, NTIMESif(rank .eq. 0) thencall MPI_SEND(a, n, MPI_DOUBLE_PRECISION, 1, 1,&MPI_COMM_WORLD, ierr)call MPI_RECV(a, n, MPI_DOUBLE_PRECISION, 1, 1,&MPI_COMM_WORLD, status, ierr)end ifif(rank .eq. 1) thencall MPI_RECV(a, n, MPI_DOUBLE_PRECISION, 0, 1,&MPI_COMM_WORLD, status, ierr)call MPI_SEND(a, n, MPI_DOUBLE_PRECISION, 0, 1,&MPI_COMM_WORLD, ierr)end ifenddotime = (MPI_WTIME(ierr)-time_start)/2/NTIMESbandwidth = (8*n*1.d0/(2**20))/timeif(max .lt. bandwidth) thenmax = bandwidthlmax = 8*nend ifif(rank .eq.