{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "## Polytrope EOS emcee workflow" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Import the libraries " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import emcee\n", "import corner\n", "import matplotlib.pyplot as plt\n", "from scipy.interpolate import interp1d\n", "\n", "# Import your existing modules\n", "import InferenceWorkflow.prior as prior\n", "# import ultranest # Not used anymore\n", "# import TOVsolver.unit as unit\n", "# from TOVsolver.unit import g_cm_3, dyn_cm_2, km, Msun, e0\n", "import TOVsolver.maximum_central_density as maximum_central_density\n", "# from TOVsolver.maxium_central_density import maxium_central_density\n", "from TOVsolver.solver_code import solveTOV2\n", "import Polytrope_EOS as Polytrope" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Define the Constants" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fm = 1\n", "hbarc = 0.197327053\n", "c = 1 # speed of light\n", "hbar = 1 # reduced Planck constant\n", "\n", "GeV = 1 / hbarc # Giga-electronvolt\n", "MeV = 1e-3 * GeV # Mega-electronvolt\n", "\n", "g = 5.625e26 * MeV # gram\n", "kg = 1e3 * g # kilogram\n", "cm = 1e13 * fm # centimeter\n", "m = 100 * cm # meter\n", "km = 1e5 * cm # kilometer\n", "s = 3e10 * cm # second\n", "\n", "dyn = g * cm / s**2 # dyne\n", "dyn_cm_2 = dyn / cm**2 # dyne / cm^2\n", "g_cm_3 = g / cm**3 # gram / cm^3\n", "erg = dyn * cm # ἐργον energy unit\n", "\n", "m_n = 939.565 * MeV # mass of neutron\n", "n0 = 0.16 / fm**3 # saturation density\n", "\n", "e0 = m_n * n0 # saturation energy density\n", "G = 6.6743e-8 * dyn * cm**2 / g**2 # gravitational constant\n", "Msun = 1.989e33 * g # mass of sun\n", "\n", "import numpy as np\n", "from scipy.interpolate import interp1d\n", "import TOVsolver.unit as unit\n", "# from TOVsolver.unit import unit\n", "from TOVsolver.main import OutputMR\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Defining the Maximum Central Density\n", "\n", "Outputs the maxium central density of a stable EoS\n", " Args:\n", " energy_density (numpy 1Darray): Density of EoS\n", " pressure (numpy 1Darray): Pressure of EoS\n", " central_densitys (numpy 1Darray): The range of central density\n", " num2 (int): The number of segments in the density interval of second search. At least 5 points\n", " Returns:\n", " density (float): The maxium central density, in unit.g_cm_3\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def maxium_central_density(energy_density, pressure, central_densitys=np.logspace(14.3, 15.6, 5) * unit.g_cm_3, num2=30):\n", " ############## Below is the main part of two searches for peaks\n", " ######## First search\n", " Ms = [-1,-2] # Meaningless initialization, only to ensure that the following 'if (i>0) and (Ms [-1]<=Ms [-2])' statements can run properly\n", " store_d_range = [central_densitys[-2], central_densitys[-1]] # When the following loop does not output a result, initialization here will have its meaning\n", " # Find the extremum point within the predetermined range of central density and return the central density near the extremum point\n", " for i, rho in enumerate(central_densitys):\n", " M, R = OutputMR('', energy_density, pressure, [rho])[0]\n", " Ms.append(M)\n", " if (i>0) and (Ms[-1] <= Ms[-2]):\n", " store_d_range = [central_densitys[i-2], central_densitys[i]] \n", " break\n", " Ms_larg = Ms[-1] # Used to store the mass corresponding to the maximum density during the first peak search\n", " \n", " ######## Second search\n", " Ms = [-1,-2] # Reinitialize\n", " # Update and refine the central density range, and ultimately return the central density of the extremum points\n", " store_d_range = np.geomspace(store_d_range[0], store_d_range[1], num2) # At least 5 points\n", " # Note that the first and last points have already been calculated, so there is no need to traverse them\n", " for i, rho in enumerate(store_d_range[1:-1]):\n", " M, R = OutputMR('', energy_density, pressure, [rho])[0]\n", " Ms.append(M)\n", " if Ms[-1] <= Ms[-2]: # Note that due to the second peak search refining the interval, the result is generally not obtained at the first point.\n", " # Therefore, initializing Ms=[-1, -2] is acceptable\n", " return store_d_range[1:][i-1]\n", " # When the above peak search fails, continue comparing the last point\n", " if Ms_larg < Ms[-1]:\n", " return store_d_range[-2]\n", " else:\n", " return store_d_range[-1]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Load the data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Load crust data\n", "Tolos_crust_out = np.loadtxt('', delimiter=None, comments='#', usecols=(0, 1, 2, 3, 4))\n", "eps_crust = Tolos_crust_out[:,3] * g_cm_3\n", "pres_crust = Tolos_crust_out[:,4] * dyn_cm_2\n", "\n", "rho_crust_end = eps_crust[-1]\n", "P_crust_end = pres_crust[-1]\n", "\n", "eps_core = np.logspace(11.7, 15.6, 1000, base=10) * g_cm_3\n", "cent_densitys = np.logspace(14.3, 15.6, 30) * g_cm_3\n", "\n", "parameters = ['rho1', 'rho2', 'rho3', 'gamma1', 'gamma2', 'gamma3', 'gamma4', 'd1', 'd2', 'd3']\n", "derived_param_names = ['d_max']\n", "\n", "gamma_lo_ranges = [1, 0.1, 0.1, 0.1]\n", "gamma_up_ranges = [1.5, 4, 8, 4]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Define the Prior" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Define log_prior as shown above\n", "def log_prior(theta):\n", " para = np.zeros(len(theta))\n", " \n", " try:\n", " # Assign rho parameters\n", " para[0] = prior.flat_prior(0.04/0.16*e0, 0.065/0.16*e0, theta[0])\n", " para[1] = prior.flat_prior(1.5*e0, 8.3*e0, theta[1])\n", " para[2] = prior.flat_prior(para[1], 8.3*e0, theta[2])\n", " \n", " rho_s = [para[0], para[1], para[2], eps_core[-1]]\n", " gamma_s = []\n", " k = 0\n", " pt = 0\n", " \n", " for i in range(4):\n", " if i == 0:\n", " gamma_max = Polytrope.fun_gamma_max(rho_s[i], rho_crust_end, P_crust_end)\n", " upper = min(gamma_up_ranges[i], gamma_max)\n", " if upper < gamma_lo_ranges[i]:\n", " return -np.inf\n", " para[3+i] = prior.flat_prior(gamma_lo_ranges[i], upper, theta[3+i])\n", " else:\n", " gamma_max = Polytrope.fun_gamma_max(rho_s[i], rho_s[i-1], pt)\n", " upper = min(gamma_up_ranges[i], gamma_max)\n", " if upper < gamma_lo_ranges[i]:\n", " return -np.inf\n", " para[3+i] = prior.flat_prior(gamma_lo_ranges[i], upper, theta[3+i])\n", " \n", " gamma_s.append(para[3+i])\n", " if i == 0:\n", " k = P_crust_end / (rho_s[0]**gamma_s[-1])\n", " else:\n", " k = pt / (rho_s[i-1]**gamma_s[-1])\n", " pt = k * rho_s[i]**gamma_s[-1]\n", " \n", " # Check derived parameters\n", " pres = Polytrope.compute_EOS(eps_core, para[3:7])[0]\n", " eps = np.hstack((eps_crust, eps_core))\n", " pres = np.hstack((pres_crust, pres))\n", " \n", " d_max = maxium_central_density(eps, pres, cent_densitys)\n", " log_d_max = 14.3 + (np.log10(d_max / g_cm_3) - 14.3) * theta[7:]\n", " if not np.all((14.3 <= log_d_max) & (log_d_max <= np.log10(cent_densitys[-1]/g_cm_3))):\n", " return -1e101\n", " \n", " return 0.0\n", " except:\n", " return -1e101" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Define the Log likelihood and the log posterior" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Define log_likelihood as shown above\n", "def log_likelihood(theta):\n", " gammas = np.array([theta[3], theta[4], theta[5], theta[6]])\n", " rho_ts = np.array([theta[0], theta[1], theta[2]])\n", " d_s = 10 ** np.array(theta[7:]) * g_cm_3\n", "\n", " theta_full = np.append(gammas, rho_ts)\n", " Ms, Rs = from_para_to_MR_points(theta_full, d_s).T\n", "\n", " # Gaussian likelihood for M and R\n", " fm = np.where(Ms > 3 * Msun, -(Ms - 2 * Msun)**20, -(Ms - Mvalue)**2 / (2 * M_sigma**2))\n", " fr = np.where(Rs > 20 * km, -(Rs - 16 * km)**20, -(Rs - Rvalue)**2 / (2 * R_sigma**2))\n", "\n", " f2 = (fm + fr).sum()\n", " likelihood = f1 + f2\n", " return likelihood\n", "\n", "# Define log_posterior\n", "def log_posterior(theta):\n", " lp = log_prior(theta)\n", " if not np.isfinite(lp):\n", " return -np.inf\n", " ll = log_likelihood(theta)\n", " return lp + ll\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Define the parameters to the Mass and Radius points " ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "\n", "\n", "# # Prepare data\n", "# datas = [\n", "# [10, 1.97, 0.5, 0.0985],\n", "# [13, 1.44, 0.65, 0.072],\n", "# [12, 2.07, 0.6, 0.1035]\n", "# ]\n", "# datas = pd.DataFrame(datas, index=[1,2,3], columns=['R', 'M', 'Rsigma', 'Msigma'])\n", "\n", "# Rvalue = datas['R'].values * km\n", "# Mvalue = datas['M'].values * Msun\n", "\n", "# R_sigma = datas['Rsigma'].values * km\n", "# M_sigma = datas['Msigma'].values * Msun\n", "\n", "# f1 = np.log(1/(R_sigma*M_sigma*(np.sqrt(2*np.pi))**2)).sum()\n", "\n", "# Define from_para_to_MR_points as in your original code\n", "def MR_likelihood_Gaussian(theta, x, d1):\n", " \"\"\"Compute the likelihood from MR measurement with Gaussian likelihood.\n", "\n", " Args:\n", " theta: Parameter set for the equation of state (EoS).\n", " x: [Mvalue, Rvalue, Mwidth, Rwidth] - parameters of the Gaussian distribution.\n", " d1: Sampled logarithmic density (log10 scale) for the measurement.\n", "\n", " Returns:\n", " likelihood (float): Log-likelihood value for the given parameter setup.\n", " \"\"\"\n", " Mvalue, Rvalue, Mwidth, Rwidth = x\n", " sigma_M = Mwidth\n", " sigma_R = Rwidth\n", "\n", " # Compute pressure and energy density from the EoS using theta\n", " pres_core = Polytrope.compute_EOS(eps_core, theta)[0]\n", " eps_total = np.hstack((eps_crust, eps_core))\n", " pres_total = np.hstack((pres_crust, pres_core))\n", "\n", " # Ensure unique pressure values for interpolation\n", " unique_pressure_indices = np.unique(pres_total, return_index=True)[1]\n", " unique_pressure = pres_total[np.sort(unique_pressure_indices)]\n", "\n", " # Create interpolating functions for EoS and its inverse\n", " eos = interp1d(eps_total, pres_total, kind='cubic', fill_value='extrapolate')\n", " inveos = interp1d(unique_pressure, eps_total[unique_pressure_indices], kind='cubic', fill_value='extrapolate')\n", "\n", " if d1 == 0:\n", " return -1e101 # Return a large negative value if density is zero\n", " else:\n", " density = 10**d1 # Convert log density to linear scale\n", " try:\n", " # Solve the TOV equations to get mass and radius for the given density\n", " MR = solveTOV2(density, pres_total[20], eos, inveos) # Returns [mass, radius]\n", "\n", " if MR[0] == 0 or MR[1] == 0:\n", " return -1e101 # Return a large negative value if mass or radius is zero\n", " else:\n", " # Compute the Gaussian likelihood\n", " fx = (1 / (sigma_M * sigma_R * (np.sqrt(2 * np.pi)) ** 2)) * np.exp(\n", " -((MR[1] / km - Rvalue) ** 2) / (2 * sigma_R ** 2)\n", " - ((MR[0] / Msun - Mvalue) ** 2) / (2 * sigma_M ** 2)\n", " )\n", " likelihood = np.log(fx)\n", " except OverflowError:\n", " return -1e101 # Return a large negative value if an overflow occurs\n", "\n", " return likelihood if likelihood > -1e101 else -1e101\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Run the EMCEE for MCMC Analysis" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Initialize emcee\n", "ndim = len(parameters) # 10\n", "nwalkers = 2 * ndim # 20\n", "nsteps = 25000 # Adjust based on convergence\n", "\n", "# Initialize walkers\n", "initial_positions = []\n", "for i in range(nwalkers):\n", " initial_position = [\n", " prior.flat_prior(0.04/0.16*e0, 0.065/0.16*e0, np.random.uniform(0, 1)),\n", " prior.flat_prior(1.5*e0, 8.3*e0, np.random.uniform(0, 1)),\n", " prior.flat_prior(1.5*e0, 8.3*e0, np.random.uniform(0, 1)),\n", " prior.flat_prior(gamma_lo_ranges[0], gamma_up_ranges[0], np.random.uniform(0, 1)),\n", " prior.flat_prior(gamma_lo_ranges[1], gamma_up_ranges[1], np.random.uniform(0, 1)),\n", " prior.flat_prior(gamma_lo_ranges[2], gamma_up_ranges[2], np.random.uniform(0, 1)),\n", " prior.flat_prior(gamma_lo_ranges[3], gamma_up_ranges[3], np.random.uniform(0, 1)),\n", " np.random.uniform(0, 1),\n", " np.random.uniform(0, 1),\n", " np.random.uniform(0, 1)\n", " ]\n", " initial_positions.append(initial_position)\n", "\n", "initial_positions = np.array(initial_positions)\n", "\n", "# Create the sampler\n", "sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior)\n", "\n", "# Run the sampler\n", "print(\"Running MCMC...\")\n", "sampler.run_mcmc(initial_positions, nsteps, progress=True)\n", "print(\"MCMC completed.\")\n", "\n", "# Flatten the chain\n", "flat_samples = sampler.get_chain(discard=1000, thin=15, flat=True)\n", "\n", "# Save the samples\n", "np.savetxt(\"emcee_samples.txt\", flat_samples)\n", "\n", "# Plot corner plot\n", "fig = corner.corner(flat_samples, labels=parameters, show_titles=True, title_fmt=\".2f\")\n", "plt.show()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" } }, "nbformat": 4, "nbformat_minor": 2 }