I have some measured data: Earth's trapped charged particles such as Van Allen Belts and South Atlantic Anomaly; and modelled data: AE8/AP8 or AE9/AP9. I would like to plot everything into one Healpy map to compare measured data and modelled data in one image. So one way could be to make a black-and-white map from measured data and then make overlying contours of modelled data, one contour for protons, the other for electrons.
EDIT2: I found some defined plotting functions like contours and also and other stuff as well at this GitHub, specifically for Healpix. They worked well for me!
So in the code, I made a logarithmic map of modelled data "log_mymap_mod" the same way as I did for measured data "log_mymap" (in EDIT1). Then I used those functions from GitHub. And then I called it this way:
MyMap = hp.newvisufunc.projview(log_mymap,
title='map_and_contour',
coord=["G"],
graticule=True,
graticule_labels=True,
flip='geo',
cmap=cmap_custom,
projection_type = 'hammer',
phi_convention="symmetrical",
cbar=False,
hold=True,
alpha=0.3
)
healpix_contour(log_mymap_mod, levels=3, colors='firebrick', alpha=1.0, linewidths=1.0)
plt.show()
EDIT1: this is what I tried:
so for measured data, I have longitude (from 0 to 360), latitude and count per second. Then I did some logarithmic map for which I transformed longitude to go from -180 to 180. Then I have data from the model, which are marked with '_mod'. Longitude goes from 0 to 360, so I tried to do the same thing as I did with measured data and then I somehow tried more ways to plot them as contours on my measured map but nothing worked for me.
lonAll2 = np.zeros_like(lonAll)
for i in range(len(lonAll)):
if lonAll[i] >= 180.0:
lonAll2[i] = lonAll[i] - 360.0
else:
lonAll2[i] = lonAll[i]
theta = (90.0 - latAll) * (np.pi / 180.0)
phi = lonAll2 * (np.pi / 180.0)
k = 4
nside = 2 ** k
npix = hp.nside2npix(nside)
healpix_sum = np.zeros(npix)
healpix_count = np.zeros(npix)
for i in range(len(lonAll)):
lon = lonAll2[i]
lat = latAll[i]
pixel_idx = hp.ang2pix(nside, np.radians(90 - lat), np.radians(lon))
count_rate = rateAll[i]
healpix_sum[pixel_idx] += count_rate
if count_rate > 0:
healpix_count[pixel_idx] += 1
mask = healpix_count > 0
healpix_mean = np.where(mask, healpix_sum / healpix_count, 0)
log_mymap = np.log10(healpix_mean)
data_mod=['a.txt']
lonAll_mod = np.array([])
latAll_mod = np.array([])
rateAll_mod = np.array([])
for filename_mod in data_mod:
if exists(filename_mod):
df = pd.read_table(filename_mod, sep=',', skiprows=14)
lonAll_mod = np.concatenate((lonAll_mod, df['lon(deg)'].values))
latAll_mod = np.concatenate((latAll_mod, df['lat(deg)'].values))
rateAll_mod = np.concatenate((rateAll_mod, df['flux'].values))
else:
print("File not found:", filename_mod)
cmap_custom2 = plt.cm.get_cmap('Greys')
MyMap = hp.newvisufunc.projview(log_mymap,
title='VZLU_rch2_k4_mean_geq0_contour',
coord=["G"],
graticule=True,
graticule_labels=True,
flip='geo',
cmap=cmap_custom2,
projection_type = 'hammer',
phi_convention="symmetrical",
cbar=False
)
ax = plt.gca()
lonAll_mod2 = np.zeros_like(lonAll_mod)
for i in range(len(lonAll_mod)):
if lonAll_mod[i] >= 180.0:
lonAll_mod2[i] = lonAll_mod[i] - 360.0
else:
lonAll_mod2[i] = lonAll_mod[i]
lon_grid, lat_grid = np.meshgrid(np.linspace(-180, 180, int(np.sqrt(rateAll_mod.size))),
np.linspace(-90, 90, int(np.sqrt(rateAll_mod.size))))
rateAll_mod_grid = griddata((lonAll_mod2, latAll_mod), rateAll_mod, (lon_grid, lat_grid), method='cubic')
theta_grid, phi_grid = hp.pix2ang(nside=nside, ipix=hp.ang2pix(nside, lon_grid, lat_grid, lonlat=True), lonlat=True)
contour = ax.contour(theta_grid, phi_grid, rateAll_mod_grid, levels=5, colors='red')
plt.show()