00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032 #include "alm_map_tools.h"
00033 #include "alm.h"
00034 #include "fftpack_support.h"
00035 #include "ylmgen.h"
00036 #include "xcomplex.h"
00037
00038 using namespace std;
00039
00040 namespace {
00041
00042 struct info_comparator
00043 {
00044 inline bool operator()(const ringinfo &a, const ringinfo &b)
00045 { return a.sth<b.sth; }
00046 };
00047
00048 struct pair_comparator
00049 {
00050 inline bool operator()(const ringpair &a, const ringpair &b)
00051 {
00052 if (a.r1.nph==b.r1.nph)
00053 return a.r1.phi0<b.r1.phi0;
00054 return a.r1.nph<b.r1.nph;
00055 }
00056 };
00057
00058 void init_lam_fact_1d (int m, arr<double> &lam_fact)
00059 {
00060 for (int l=m; l<lam_fact.size(); ++l)
00061 lam_fact[l] = (l<2) ? 0. : 2*sqrt((2*l+1.)/(2*l-1.) * (l*l-m*m));
00062 }
00063
00064 void init_lam_fact_deriv_1d (int m, arr<double> &lam_fact)
00065 {
00066 lam_fact[m]=0;
00067 for (int l=m+1; l<lam_fact.size(); ++l)
00068 lam_fact[l] = sqrt((2*l+1.)/(2*l-1.) * (l*l-m*m));
00069 }
00070
00071 void init_normal_l (arr<double> &normal_l)
00072 {
00073 for (int l=0; l<normal_l.size(); ++l)
00074 normal_l[l] = (l<2) ? 0. : sqrt(1./((l+2.)*(l+1.)*l*(l-1.)));
00075 }
00076
00077 void get_chunk_info (int nrings, int &nchunks, int &chunksize)
00078 {
00079 nchunks = nrings/max(100,nrings/10) + 1;
00080 chunksize = (nrings+nchunks-1)/nchunks;
00081 }
00082
00083 class ringhelper
00084 {
00085 private:
00086 double phi0_;
00087 arr<xcomplex<double> > shiftarr, work;
00088 rfft plan;
00089 bool norot;
00090
00091 void update(int nph, int mmax, double phi0)
00092 {
00093 norot = (abs(phi0)<1e-14);
00094 if (!norot)
00095 {
00096 if ((mmax!=shiftarr.size()-1) || (!approx(phi0,phi0_,1e-12)))
00097 {
00098 shiftarr.alloc(mmax+1);
00099 phi0_=phi0;
00100 for (int m=0; m<=mmax; ++m)
00101 shiftarr[m].Set (cos(m*phi0),sin(m*phi0));
00102 }
00103 }
00104 if (nph!=plan.size())
00105 plan.Set(nph);
00106 if (nph>work.size())
00107 work.alloc(2*nph);
00108 }
00109
00110 public:
00111 ringhelper() : phi0_(0), norot(true) {}
00112
00113 template<typename T> void phase2ring (int nph, int mmax, double phi0,
00114 const xcomplex<double> *phase, T *ring)
00115 {
00116 update (nph, mmax, phi0);
00117
00118 for (int m=1; m<nph; ++m) work[m]=0;
00119 work[0]=phase[0];
00120
00121 if (norot)
00122 for (int m=1; m<=mmax; ++m)
00123 {
00124 work[m%nph] += phase[m];
00125 work[nph-1-((m-1)%nph)] += conj(phase[m]);
00126 }
00127 else
00128 for (int m=1; m<=mmax; ++m)
00129 {
00130 xcomplex<double> tmp = phase[m]*shiftarr[m];
00131 work[m%nph] += tmp;
00132 work[nph-1-((m-1)%nph)] += conj(tmp);
00133 }
00134
00135 plan.backward_c(work);
00136 for (int m=0; m<nph; ++m) ring[m] = work[m].re;
00137 }
00138 template<typename T> void phase2ring (int mmax,
00139 const xcomplex<double> *phase, const ringinfo &info, T *data)
00140 {
00141 if (info.nph>0)
00142 phase2ring (info.nph, mmax, info.phi0, phase, data+info.ofs);
00143 }
00144 template<typename T> void phase2pair (int mmax,
00145 const xcomplex<double> *phase1, const xcomplex<double> *phase2,
00146 const ringpair &pair, T *data)
00147 {
00148 phase2ring (mmax, phase1, pair.r1, data);
00149 phase2ring (mmax, phase2, pair.r2, data);
00150 }
00151
00152 template<typename T> void ring2phase (int nph, int mmax, double phi0,
00153 double weight, const T *ring, xcomplex<double> *phase)
00154 {
00155 update (nph, mmax, -phi0);
00156 for (int m=0; m<nph; ++m) work[m] = ring[m]*weight;
00157 plan.forward_c(work);
00158
00159 if (norot)
00160 for (int m=0; m<=mmax; ++m)
00161 phase[m] = work[m%nph];
00162 else
00163 for (int m=0; m<=mmax; ++m)
00164 phase[m] = work[m%nph]*shiftarr[m];
00165 }
00166 template<typename T> void ring2phase (int mmax, const ringinfo &info,
00167 const T *data, xcomplex<double> *phase)
00168 {
00169 if (info.nph>0)
00170 ring2phase (info.nph, mmax, info.phi0, info.weight, data+info.ofs,
00171 phase);
00172 }
00173 template<typename T> void pair2phase (int mmax, const ringpair &pair,
00174 const T *data, xcomplex<double> *phase1, xcomplex<double> *phase2)
00175 {
00176 ring2phase (mmax, pair.r1, data, phase1);
00177 ring2phase (mmax, pair.r2, data, phase2);
00178 }
00179 };
00180
00181 }
00182
00183
00184 void info2pair(const vector<ringinfo> &info, vector<ringpair> &pair)
00185 {
00186 pair.clear();
00187 vector<ringinfo> info2=info;
00188 sort(info2.begin(),info2.end(),info_comparator());
00189 unsigned int pos=0;
00190 while (pos<info2.size()-1)
00191 {
00192 if (approx(info2[pos].cth,-info2[pos+1].cth,1e-12))
00193 {
00194 pair.push_back(ringpair(info2[pos],info2[pos+1]));
00195 pos+=2;
00196 }
00197 else
00198 {
00199 pair.push_back(ringpair(info2[pos]));
00200 ++pos;
00201 }
00202 }
00203 if (pos<info2.size())
00204 pair.push_back(info2[pos]);
00205
00206 sort(pair.begin(),pair.end(),pair_comparator());
00207 }
00208
00209
00210 #define MAP2ALM_MACRO(px) \
00211 { \
00212 alm_tmp[l].re += px.re*Ylm[l]; \
00213 alm_tmp[l].im += px.im*Ylm[l]; \
00214 ++l; \
00215 }
00216
00217 template<typename T> void map2alm (const vector<ringpair> &pair,
00218 const T *map, Alm<xcomplex<T> > &alm, bool add_alm)
00219 {
00220 int lmax = alm.Lmax(), mmax = alm.Mmax();
00221
00222 int nchunks, chunksize;
00223 get_chunk_info(pair.size(),nchunks,chunksize);
00224 arr2<xcomplex<double> > phas1(chunksize,mmax+1), phas2(chunksize,mmax+1);
00225
00226 if (!add_alm) alm.SetToZero();
00227
00228 for (int chunk=0; chunk<nchunks; ++chunk)
00229 {
00230 int llim=chunk*chunksize, ulim=min(llim+chunksize,int(pair.size()));
00231
00232 #pragma omp parallel
00233 {
00234 ringhelper helper;
00235
00236 int ith;
00237 #pragma omp for schedule(dynamic,1)
00238 for (ith=llim; ith<ulim; ++ith)
00239 helper.pair2phase(mmax,pair[ith],map,phas1[ith-llim],phas2[ith-llim]);
00240 }
00241
00242 #pragma omp parallel
00243 {
00244 Ylmgen generator(lmax,mmax,1e-30);
00245 arr<double> Ylm;
00246 arr<xcomplex<double> > alm_tmp(lmax+1);
00247 int m;
00248 #pragma omp for schedule(dynamic,1)
00249 for (m=0; m<=mmax; ++m)
00250 {
00251 for (int l=m; l<=lmax; ++l) alm_tmp[l].Set(0.,0.);
00252 for (int ith=0; ith<ulim-llim; ++ith)
00253 {
00254 int l;
00255 generator.get_Ylm(pair[ith+llim].r1.cth,pair[ith+llim].r1.sth,m,Ylm,l);
00256 if (l<=lmax)
00257 {
00258 if (pair[ith+llim].r2.nph>0)
00259 {
00260 xcomplex<double> p1 = phas1[ith][m]+phas2[ith][m],
00261 p2 = phas1[ith][m]-phas2[ith][m];
00262
00263 if ((l-m)&1)
00264 MAP2ALM_MACRO(p2)
00265 for (;l<lmax;)
00266 {
00267 MAP2ALM_MACRO(p1)
00268 MAP2ALM_MACRO(p2)
00269 }
00270 if (l==lmax)
00271 MAP2ALM_MACRO(p1)
00272 }
00273 else
00274 {
00275 xcomplex<double> p1 = phas1[ith][m];
00276 for (;l<=lmax;)
00277 MAP2ALM_MACRO(p1)
00278 }
00279 }
00280 }
00281 xcomplex<T> *palm = alm.mstart(m);
00282 for (int l=m; l<=lmax; ++l)
00283 { palm[l].re += alm_tmp[l].re; palm[l].im += alm_tmp[l].im; }
00284 }
00285 }
00286 }
00287 }
00288
00289 template void map2alm (const vector<ringpair> &pair,
00290 const float *map, Alm<xcomplex<float> > &alm, bool add_alm);
00291 template void map2alm (const vector<ringpair> &pair,
00292 const double *map, Alm<xcomplex<double> > &alm, bool add_alm);
00293
00294 #define SETUP_LAMBDA \
00295 const double t1 = lam_lm1m*lam_fact[l]; \
00296 const double a_w = (l-m2)*two_on_s2 + l*(l-1); \
00297 const double a_x = twocth*(l-1)*lam_lm; \
00298 const double lambda_w = a_w*lam_lm - t1*c_on_s2; \
00299 const double lambda_x = m_on_s2 * (a_x-t1);
00300
00301 #define MAP2ALM_POL_MACRO(Tx,Qx,Qy,Ux,Uy) \
00302 { \
00303 double lam_lm1m=lam_lm; \
00304 lam_lm=Ylm[l]; \
00305 alm_tmp[l][0].re += Tx.re*lam_lm; alm_tmp[l][0].im += Tx.im*lam_lm; \
00306 SETUP_LAMBDA \
00307 alm_tmp[l][1].re += Qx.re*lambda_w - Uy.im*lambda_x; \
00308 alm_tmp[l][1].im += Qx.im*lambda_w + Uy.re*lambda_x; \
00309 alm_tmp[l][2].re += Ux.re*lambda_w + Qy.im*lambda_x; \
00310 alm_tmp[l][2].im += Ux.im*lambda_w - Qy.re*lambda_x; \
00311 ++l; \
00312 }
00313
00314 template<typename T> void map2alm_pol
00315 (const vector<ringpair> &pair, const T *mapT, const T *mapQ, const T *mapU,
00316 Alm<xcomplex<T> > &almT, Alm<xcomplex<T> > &almG, Alm<xcomplex<T> > &almC,
00317 bool add_alm)
00318 {
00319 planck_assert (almT.conformable(almG) && almT.conformable(almC),
00320 "map2alm_pol: a_lm are not conformable");
00321
00322 int lmax = almT.Lmax(), mmax = almT.Mmax();
00323
00324 arr<double> normal_l (lmax+1);
00325 init_normal_l (normal_l);
00326
00327 int nchunks, chunksize;
00328 get_chunk_info(pair.size(),nchunks,chunksize);
00329
00330 arr2<xcomplex<double> > phas1T(chunksize,mmax+1),phas2T(chunksize,mmax+1),
00331 phas1Q(chunksize,mmax+1),phas2Q(chunksize,mmax+1),
00332 phas1U(chunksize,mmax+1),phas2U(chunksize,mmax+1);
00333
00334 if (!add_alm)
00335 { almT.SetToZero(); almG.SetToZero(); almC.SetToZero(); }
00336
00337 for (int chunk=0; chunk<nchunks; ++chunk)
00338 {
00339 int llim=chunk*chunksize, ulim=min(llim+chunksize,int(pair.size()));
00340
00341 #pragma omp parallel
00342 {
00343 ringhelper helper;
00344
00345 int ith;
00346 #pragma omp for schedule(dynamic,1)
00347 for (ith=llim; ith<ulim; ++ith)
00348 {
00349 helper.pair2phase (mmax, pair[ith], mapT,
00350 phas1T[ith-llim], phas2T[ith-llim]);
00351 helper.pair2phase (mmax, pair[ith], mapQ,
00352 phas1Q[ith-llim], phas2Q[ith-llim]);
00353 helper.pair2phase (mmax, pair[ith], mapU,
00354 phas1U[ith-llim], phas2U[ith-llim]);
00355 }
00356 }
00357
00358 #pragma omp parallel
00359 {
00360 Ylmgen generator(lmax,mmax,1e-30);
00361 arr<double> Ylm;
00362 arr<double> lam_fact(lmax+1);
00363 arr<xcomplex<double>[3] > alm_tmp(lmax+1);
00364 int m;
00365 #pragma omp for schedule(dynamic,1)
00366 for (m=0; m<=mmax; ++m)
00367 {
00368 init_lam_fact_1d (m,lam_fact);
00369
00370 for (int l=m; l<alm_tmp.size(); ++l)
00371 alm_tmp[l][0]=alm_tmp[l][1]=alm_tmp[l][2] = 0;
00372
00373 for (int ith=0; ith<ulim-llim; ++ith)
00374 {
00375 int l;
00376 double cth=pair[ith+llim].r1.cth, sth=pair[ith+llim].r1.sth;
00377 generator.get_Ylm(cth,sth,m,Ylm,l);
00378 if (l<=lmax)
00379 {
00380 double one_on_s2 = 1/(sth*sth);
00381 double c_on_s2 = cth * one_on_s2;
00382 double two_on_s2 = 2*one_on_s2;
00383 double twocth = 2*cth;
00384 int m2 = m*m;
00385 double m_on_s2 = m*one_on_s2;
00386
00387 if (pair[ith+llim].r2.nph>0)
00388 {
00389 xcomplex<double> T1 = phas1T[ith][m]+phas2T[ith][m],
00390 T2 = phas1T[ith][m]-phas2T[ith][m],
00391 Q1 = phas1Q[ith][m]+phas2Q[ith][m],
00392 Q2 = phas1Q[ith][m]-phas2Q[ith][m],
00393 U1 = phas1U[ith][m]+phas2U[ith][m],
00394 U2 = phas1U[ith][m]-phas2U[ith][m];
00395
00396 double lam_lm = 0;
00397 if ((l-m)&1)
00398 MAP2ALM_POL_MACRO(T2,Q2,Q1,U2,U1)
00399 for (;l<lmax;)
00400 {
00401 MAP2ALM_POL_MACRO(T1,Q1,Q2,U1,U2)
00402 MAP2ALM_POL_MACRO(T2,Q2,Q1,U2,U1)
00403 }
00404 if (l==lmax)
00405 MAP2ALM_POL_MACRO(T1,Q1,Q2,U1,U2)
00406 }
00407 else
00408 {
00409 xcomplex<double> T1 = phas1T[ith][m],
00410 Q1 = phas1Q[ith][m],
00411 U1 = phas1U[ith][m];
00412 double lam_lm = 0;
00413 for (;l<=lmax;)
00414 MAP2ALM_POL_MACRO(T1,Q1,Q1,U1,U1)
00415 }
00416 }
00417 }
00418 xcomplex<T> *palmT=almT.mstart(m), *palmG=almG.mstart(m),
00419 *palmC=almC.mstart(m);
00420 for (int l=m;l<=lmax;++l)
00421 {
00422 palmT[l].re += alm_tmp[l][0].re;
00423 palmT[l].im += alm_tmp[l][0].im;
00424 palmG[l].re += alm_tmp[l][1].re*normal_l[l];
00425 palmG[l].im += alm_tmp[l][1].im*normal_l[l];
00426 palmC[l].re += alm_tmp[l][2].re*normal_l[l];
00427 palmC[l].im += alm_tmp[l][2].im*normal_l[l];
00428 }
00429 }
00430 }
00431 }
00432 }
00433
00434 template void map2alm_pol (const vector<ringpair> &pair, const float *mapT,
00435 const float *mapQ, const float *mapU, Alm<xcomplex<float> > &almT,
00436 Alm<xcomplex<float> > &almG, Alm<xcomplex<float> > &almC, bool add_alm);
00437 template void map2alm_pol (const vector<ringpair> &pair, const double *mapT,
00438 const double *mapQ, const double *mapU, Alm<xcomplex<double> > &almT,
00439 Alm<xcomplex<double> > &almG, Alm<xcomplex<double> > &almC, bool add_alm);
00440
00441
00442 #define ALM2MAP_MACRO(px) \
00443 { \
00444 px.re += alm_tmp[l].re*Ylm[l]; \
00445 px.im += alm_tmp[l].im*Ylm[l]; \
00446 ++l; \
00447 }
00448
00449 template<typename T> void alm2map (const Alm<xcomplex<T> > &alm,
00450 const vector<ringpair> &pair, T *map)
00451 {
00452 int lmax = alm.Lmax(), mmax = alm.Mmax();
00453
00454 int nchunks, chunksize;
00455 get_chunk_info(pair.size(),nchunks,chunksize);
00456
00457 arr2<xcomplex<double> > phas1(chunksize,mmax+1), phas2(chunksize,mmax+1);
00458
00459 for (int chunk=0; chunk<nchunks; ++chunk)
00460 {
00461 int llim=chunk*chunksize, ulim=min(llim+chunksize,int(pair.size()));
00462
00463 #pragma omp parallel
00464 {
00465 Ylmgen generator(lmax,mmax,1e-30);
00466 arr<double> Ylm;
00467 arr<xcomplex<double> > alm_tmp(lmax+1);
00468 int m;
00469 #pragma omp for schedule(dynamic,1)
00470 for (m=0; m<=mmax; ++m)
00471 {
00472 for (int l=m; l<=lmax; ++l)
00473 alm_tmp[l]=alm(l,m);
00474
00475 for (int ith=0; ith<ulim-llim; ++ith)
00476 {
00477 int l;
00478 generator.get_Ylm(pair[ith+llim].r1.cth,pair[ith+llim].r1.sth,m,Ylm,l);
00479 if (l>lmax)
00480 phas1[ith][m] = phas2[ith][m] = 0;
00481 else
00482 {
00483 if (pair[ith+llim].r2.nph>0)
00484 {
00485 xcomplex<double> p1=0, p2=0;
00486
00487 if ((l-m)&1)
00488 ALM2MAP_MACRO(p2)
00489 for (;l<lmax;)
00490 {
00491 ALM2MAP_MACRO(p1)
00492 ALM2MAP_MACRO(p2)
00493 }
00494 if (l==lmax)
00495 ALM2MAP_MACRO(p1)
00496 phas1[ith][m] = p1+p2; phas2[ith][m] = p1-p2;
00497 }
00498 else
00499 {
00500 xcomplex<double> p1=0;
00501 for (;l<=lmax;)
00502 ALM2MAP_MACRO(p1)
00503 phas1[ith][m] = p1;
00504 }
00505 }
00506 }
00507 }
00508 }
00509
00510 #pragma omp parallel
00511 {
00512 ringhelper helper;
00513 int ith;
00514 #pragma omp for schedule(dynamic,1)
00515 for (ith=llim; ith<ulim; ++ith)
00516 helper.phase2pair (mmax,phas1[ith-llim],phas2[ith-llim],pair[ith],map);
00517 }
00518 }
00519 }
00520
00521 template void alm2map (const Alm<xcomplex<float> > &alm,
00522 const vector<ringpair> &pair, float *map);
00523 template void alm2map (const Alm<xcomplex<double> > &alm,
00524 const vector<ringpair> &pair, double *map);
00525
00526 #define ALM2MAP_POL_MACRO(Tx,Qx,Qy,Ux,Uy) \
00527 { \
00528 double lam_lm1m = lam_lm; \
00529 lam_lm = Ylm[l]; \
00530 Tx.re+=alm_tmp[l][0].re*lam_lm; Tx.im+=alm_tmp[l][0].im*lam_lm; \
00531 SETUP_LAMBDA \
00532 Qx.re+=alm_tmp[l][1].re*lambda_w; Qx.im+=alm_tmp[l][1].im*lambda_w; \
00533 Ux.re-=alm_tmp[l][2].re*lambda_w; Ux.im-=alm_tmp[l][2].im*lambda_w; \
00534 Qy.re-=alm_tmp[l][2].im*lambda_x; Qy.im+=alm_tmp[l][2].re*lambda_x; \
00535 Uy.re-=alm_tmp[l][1].im*lambda_x; Uy.im+=alm_tmp[l][1].re*lambda_x; \
00536 ++l; \
00537 }
00538
00539 template<typename T> void alm2map_pol (const Alm<xcomplex<T> > &almT,
00540 const Alm<xcomplex<T> > &almG, const Alm<xcomplex<T> > &almC,
00541 const vector<ringpair> &pair, T *mapT, T *mapQ, T *mapU)
00542 {
00543 int lmax = almT.Lmax(), mmax = almT.Mmax();
00544
00545 planck_assert (almT.conformable(almG) && almT.conformable(almC),
00546 "alm2map_pol: a_lm are not conformable");
00547
00548 arr<double> normal_l (lmax+1);
00549 init_normal_l (normal_l);
00550
00551 int nchunks, chunksize;
00552 get_chunk_info(pair.size(),nchunks,chunksize);
00553
00554 arr2<xcomplex<double> >
00555 phas1T(chunksize,mmax+1), phas2T(chunksize,mmax+1),
00556 phas1Q(chunksize,mmax+1), phas2Q(chunksize,mmax+1),
00557 phas1U(chunksize,mmax+1), phas2U(chunksize,mmax+1);
00558
00559 for (int chunk=0; chunk<nchunks; ++chunk)
00560 {
00561 int llim=chunk*chunksize, ulim=min(llim+chunksize,int(pair.size()));
00562
00563 #pragma omp parallel
00564 {
00565 Ylmgen generator(lmax,mmax,1e-30);
00566 arr<double> Ylm;
00567 arr<double> lam_fact (lmax+1);
00568 arr<xcomplex<double>[3]> alm_tmp(lmax+1);
00569 int m;
00570 #pragma omp for schedule(dynamic,1)
00571 for (m=0; m<=mmax; ++m)
00572 {
00573 int m2 = m*m;
00574 init_lam_fact_1d (m,lam_fact);
00575 for (int l=m; l<=lmax; ++l)
00576 {
00577 alm_tmp[l][0] = almT(l,m);
00578 alm_tmp[l][1] = almG(l,m)*(-normal_l[l]);
00579 alm_tmp[l][2] = almC(l,m)*(-normal_l[l]);
00580 }
00581 for (int ith=0; ith<ulim-llim; ++ith)
00582 {
00583 double cth=pair[ith+llim].r1.cth, sth=pair[ith+llim].r1.sth;
00584 int l;
00585 generator.get_Ylm(cth,sth,m,Ylm,l);
00586 if (l<=lmax)
00587 {
00588 double one_on_s2 = 1/(sth*sth);
00589 double c_on_s2 = cth * one_on_s2;
00590 double two_on_s2 = 2*one_on_s2;
00591 double m_on_s2 = m*one_on_s2;
00592 double twocth = 2*cth;
00593
00594 if (pair[ith+llim].r2.nph>0)
00595 {
00596 xcomplex<double> T1=0, T2=0, Q1=0, Q2=0, U1=0, U2=0;
00597 double lam_lm = 0;
00598 if ((l-m)&1)
00599 ALM2MAP_POL_MACRO(T2,Q2,Q1,U2,U1)
00600 for (;l<lmax;)
00601 {
00602 ALM2MAP_POL_MACRO(T1,Q1,Q2,U1,U2)
00603 ALM2MAP_POL_MACRO(T2,Q2,Q1,U2,U1)
00604 }
00605 if (l==lmax)
00606 ALM2MAP_POL_MACRO(T1,Q1,Q2,U1,U2)
00607
00608 phas1T[ith][m] = T1+T2; phas2T[ith][m] = T1-T2;
00609 phas1Q[ith][m] =-Q1-Q2; phas2Q[ith][m] =-Q1+Q2;
00610 phas1U[ith][m] = U1+U2; phas2U[ith][m] = U1-U2;
00611 }
00612 else
00613 {
00614 xcomplex<double> T1=0, Q1=0, U1=0;
00615 double lam_lm = 0;
00616 for (;l<=lmax;)
00617 { ALM2MAP_POL_MACRO(T1,Q1,Q1,U1,U1) }
00618 phas1T[ith][m] = T1;
00619 phas1Q[ith][m] =-Q1;
00620 phas1U[ith][m] = U1;
00621 }
00622 }
00623 else
00624 {
00625 phas1T[ith][m] = phas2T[ith][m] = 0;
00626 phas1Q[ith][m] = phas2Q[ith][m] = 0;
00627 phas1U[ith][m] = phas2U[ith][m] = 0;
00628 }
00629 }
00630 }
00631 }
00632
00633 #pragma omp parallel
00634 {
00635 ringhelper helper;
00636 int ith;
00637 #pragma omp for schedule(dynamic,1)
00638 for (ith=llim; ith<ulim; ++ith)
00639 {
00640 helper.phase2pair(mmax,phas1T[ith-llim],phas2T[ith-llim],pair[ith],mapT);
00641 helper.phase2pair(mmax,phas1Q[ith-llim],phas2Q[ith-llim],pair[ith],mapQ);
00642 helper.phase2pair(mmax,phas1U[ith-llim],phas2U[ith-llim],pair[ith],mapU);
00643 }
00644 }
00645 }
00646 }
00647
00648 template void alm2map_pol (const Alm<xcomplex<float> > &almT,
00649 const Alm<xcomplex<float> > &almG, const Alm<xcomplex<float> > &almC,
00650 const vector<ringpair> &pair, float *mapT, float *mapQ, float *mapU);
00651 template void alm2map_pol (const Alm<xcomplex<double> > &almT,
00652 const Alm<xcomplex<double> > &almG, const Alm<xcomplex<double> > &almC,
00653 const vector<ringpair> &pair, double *mapT, double *mapQ, double *mapU);
00654
00655 #define ALM2MAP_DER1_MACRO(px,pdthx,pdphx) \
00656 { \
00657 double lam_lm1m = lam_lm; \
00658 lam_lm = Ylm[l]; \
00659 const double t1 = alm_tmp[l].re*lam_lm; \
00660 const double t2 = alm_tmp[l].im*lam_lm; \
00661 const double t3 = l*cotanth; \
00662 const double t4 = one_on_s*lam_lm1m*lam_fact[l]; \
00663 px.re+=t1; px.im+=t2; \
00664 pdthx.re+=t3*t1-t4*alm_tmp[l].re; pdthx.im+=t3*t2-t4*alm_tmp[l].im; \
00665 pdphx.re-=m*t2; pdphx.im+=m*t1; \
00666 ++l; \
00667 }
00668
00669 template<typename T> void alm2map_der1 (const Alm<xcomplex<T> > &alm,
00670 const vector<ringpair> &pair, T *map, T *dth, T *dph)
00671 {
00672 int lmax = alm.Lmax(), mmax = alm.Mmax();
00673
00674 int nchunks, chunksize;
00675 get_chunk_info(pair.size(),nchunks,chunksize);
00676
00677 arr2<xcomplex<double> >
00678 phas1(chunksize,mmax+1), phas2(chunksize,mmax+1),
00679 phas1dth(chunksize,mmax+1), phas2dth(chunksize,mmax+1),
00680 phas1dph(chunksize,mmax+1), phas2dph(chunksize,mmax+1);
00681
00682 for (int chunk=0; chunk<nchunks; ++chunk)
00683 {
00684 int llim=chunk*chunksize, ulim=min(llim+chunksize,int(pair.size()));
00685
00686 #pragma omp parallel
00687 {
00688 Ylmgen generator(lmax,mmax,1e-30);
00689 arr<double> Ylm;
00690 arr<double> lam_fact (lmax+1);
00691 int m;
00692 #pragma omp for schedule(dynamic,1)
00693 for (m=0; m<=mmax; ++m)
00694 {
00695 const xcomplex<T> *alm_tmp=alm.mstart(m);
00696 init_lam_fact_deriv_1d (m,lam_fact);
00697 for (int ith=0; ith<ulim-llim; ++ith)
00698 {
00699 double cth=pair[ith+llim].r1.cth, sth=pair[ith+llim].r1.sth;
00700 int l;
00701 generator.get_Ylm(cth,sth,m,Ylm,l);
00702 if (l<=lmax)
00703 {
00704 double one_on_s = 1/sth;
00705 double cotanth = cth*one_on_s;
00706
00707 if (pair[ith+llim].r2.nph>0)
00708 {
00709 xcomplex<double> p1=0, p2=0, pth1=0, pth2=0, pph1=0, pph2=0;
00710
00711 double lam_lm = 0;
00712 if ((l-m)&1)
00713 ALM2MAP_DER1_MACRO(p2,pth2,pph2)
00714 for(;l<lmax;)
00715 {
00716 ALM2MAP_DER1_MACRO(p1,pth1,pph1)
00717 ALM2MAP_DER1_MACRO(p2,pth2,pph2)
00718 }
00719 if (l==lmax)
00720 ALM2MAP_DER1_MACRO(p1,pth1,pph1)
00721
00722 phas1[ith][m] = p1+p2; phas2[ith][m] = p1-p2;
00723 phas1dth[ith][m] = pth1+pth2; phas2dth[ith][m] = pth2-pth1;
00724 phas1dph[ith][m] = (pph1+pph2)*one_on_s;
00725 phas2dph[ith][m] = (pph1-pph2)*one_on_s;
00726 }
00727 else
00728 {
00729 xcomplex<double> p1=0, pth1=0, pph1=0;
00730
00731 double lam_lm = 0;
00732 for(;l<=lmax;)
00733 ALM2MAP_DER1_MACRO(p1,pth1,pph1)
00734
00735 phas1[ith][m] = p1;
00736 phas1dth[ith][m] = pth1;
00737 phas1dph[ith][m] = pph1*one_on_s;
00738 }
00739 }
00740 else
00741 {
00742 phas1[ith][m] = phas2[ith][m] = 0;
00743 phas1dth[ith][m] = phas2dth[ith][m] = 0;
00744 phas1dph[ith][m] = phas2dph[ith][m] = 0;
00745 }
00746 }
00747 }
00748 }
00749
00750 #pragma omp parallel
00751 {
00752 ringhelper helper;
00753 int ith;
00754 #pragma omp for schedule(dynamic,1)
00755 for (ith=llim; ith<ulim; ++ith)
00756 {
00757 helper.phase2pair(mmax,phas1[ith-llim],phas2[ith-llim],pair[ith],map);
00758 helper.phase2pair(mmax,phas1dth[ith-llim],phas2dth[ith-llim],
00759 pair[ith],dth);
00760 helper.phase2pair(mmax,phas1dph[ith-llim],phas2dph[ith-llim],
00761 pair[ith],dph);
00762 }
00763 }
00764 }
00765 }
00766
00767 template void alm2map_der1 (const Alm<xcomplex<float> > &alm,
00768 const vector<ringpair> &pair, float *map, float *dth, float *dph);
00769 template void alm2map_der1 (const Alm<xcomplex<double> > &alm,
00770 const vector<ringpair> &pair, double *map, double *dth, double *dph);