GalacticDNSMass

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs | README

hypothesisA.py (5391B)


      1 import pymultinest
      2 import numpy as np
      3 from scipy import stats
      4 from scipy.special import erf
      5 import os
      6 import sys
      7 from datetime import datetime
      8 
      9 startTime = datetime.now()
     10 
     11 ###
     12 # Directory of current script
     13 # Required by Condor jobs to find relevant samples etc.
     14 topDirectory = os.path.dirname(os.path.realpath(__file__)) + '/'
     15 
     16 # Load BNS mass samples
     17 # (eg. 17x10000 array)
     18 pulsarSamples = np.loadtxt(topDirectory + 'Samples/mr_samples.txt')
     19 companionSamples = np.loadtxt(topDirectory + 'Samples/ms_samples.txt')
     20 
     21 # For each BNS, group pulsar & companion samples into pairs.
     22 # Creates array of shape like (17x10000x2)
     23 bothMassSamples = np.stack((pulsarSamples, companionSamples), axis=-1)
     24 
     25 # Only use first 1000 samples to increase speed
     26 massSamples = bothMassSamples[:,:1000,:]
     27 
     28 # Define the nMeasurements and nSamples per mass measurement
     29 nSamples = len(massSamples[0])
     30 nMeasurements = len(massSamples)
     31 
     32 
     33 
     34 def evalSingleGaussian(theta, x):
     35     mu, sig = theta[0], theta[1]
     36     normalisingTerm = (0.5*(1+erf((2-mu)/(sig*2**0.5)) - (1+erf((0.8-mu)/(sig*2**0.5)))))
     37     return stats.norm(mu, sig).pdf(x) * 1.0/normalisingTerm
     38 
     39 # Two Gaussian Functions
     40 def evalTwoGaussian(theta, x):
     41     mu1, mu2, sig1, sig2, alpha = theta
     42     normalisingTerm1 = (0.5*(1+erf((2-mu1)/(sig1*2**0.5)) - (1+erf((0.8-mu1)/(sig1*2**0.5)))))
     43     normalisingTerm2 = (0.5*(1+erf((2-mu2)/(sig2*2**0.5)) - (1+erf((0.8-mu2)/(sig2*2**0.5)))))
     44     return alpha * stats.norm(mu1, sig1).pdf(x) * 1.0/normalisingTerm1 + (1-alpha) * stats.norm(mu2, sig2).pdf(x) * 1.0/normalisingTerm2
     45 
     46 # Uniform Functions
     47 def evalUniform(theta, x):
     48     mMin, mMax = theta[0], theta[1]
     49     return stats.uniform(mMin, mMax-mMin).pdf(x)
     50 
     51 
     52 
     53 ### Model Information Dictionaries
     54 singleGaussianModel = {
     55     "name": "singleGaussian",
     56     "pdf": evalSingleGaussian,
     57     "ndim": 2,
     58     "params": ['mu', 'sigma']
     59 }
     60 
     61 twoGaussianModel = {
     62     "name": "twoGaussian",
     63     "pdf": evalTwoGaussian,
     64     "ndim": 5,
     65     "params": ['mu1', 'mu2', 'sigma1', 'sigma2', 'alpha']
     66 }
     67 
     68 uniformModel = {
     69     "name": "uniform",
     70     "pdf": evalUniform,
     71     "ndim": 2,
     72     "params": ['mMin', 'mMax']
     73 }
     74 
     75 
     76 
     77 ### Prior for each model.
     78 def prior(cube, ndim, nparams):
     79     # cube is initially a unit hypercube which is to be mapped onto the relevant prior space.
     80     
     81     if modelName == 'singleGaussian':
     82         cube[0] = 0.8 + cube[0] * (2 - 0.8)
     83         cube[1] = 0.005 + cube[1] * (0.5 - 0.005)
     84     if modelName == 'twoGaussian':
     85         cube[0] = 0.8 + cube[0] * (2 - 0.8)
     86         cube[1] = cube[0] + cube[1] * (2 - cube[0])
     87         cube[2] = 0.005 + cube[2] * (0.5 - 0.005)
     88         cube[3] = 0.005 + cube[3] * (0.5 - 0.005)
     89         cube[4] = cube[4] * 1
     90     if modelName == 'uniform':
     91         cube[0] = 0.8 + cube[0] * (2 - 0.8)
     92         cube[1] = cube[0] + cube[1] * (2 - cube[0])
     93     
     94     return
     95 
     96 
     97 
     98 ### Likelihood Function (Same as Farr et al.)
     99 def likelihood(cube, ndim, nparams):
    100     
    101     # Create lists of the parameters for the model. 
    102     paramList = [cube[i] for i in range(ndim)]
    103     
    104     # Initial list to contain the sum of the products of the probability for each m_r and m_s sample in their respective models.
    105     pdfProductSumList = []
    106     
    107     # For the m_r and m_s pairs in each BNS system. (eg. 1000x2)
    108     for massSample in massSamples:
    109         
    110         # Evaluate the PDF function down the m_r and m_s samples of the BNS
    111         mrProbabilities = modelEval(paramList, massSample[:,0])
    112         msProbabilities = modelEval(paramList, massSample[:,1])
    113         
    114         # Evaluate the product of the m_r and m_s probability for each pair.
    115         probabilityProduct = mrProbabilities*msProbabilities
    116         
    117         # Append the sum over all the probability products of each pair.
    118         pdfProductSumList.append(np.sum(probabilityProduct))
    119     
    120     # If either all the m_r or all the m_s samples are completely outside their model then return a log-likelihood of -inf.
    121     if 0 in pdfProductSumList:
    122         print("Zero probability value - Parameters: {}".format(paramList))
    123         return -np.inf
    124  
    125     # The log-likelihood is the log of the normalised sum over the log of each pdfProductSum
    126     loglikelihood = nMeasurements * np.log(1.0/nSamples) + np.sum(np.log(pdfProductSumList))
    127     return loglikelihood
    128 
    129 
    130 ### Models
    131 # This list contains the set of all model dictionary combinations
    132 # Model names, pdf functions, nDimensions, ParameterNames
    133 modelSet = [singleGaussianModel, twoGaussianModel, uniformModel]
    134 
    135 
    136 ### System input (models choice)
    137 # Takes the system argument given.
    138 # eg hypothesisA.py 2 will select the model with index 1, twoGaussian.
    139 modelSetIndex = int(sys.argv[1]) - 1
    140 
    141 # Set relevant variables which are to be used in this sampling
    142 modelDictionary = modelSet[modelSetIndex]
    143 modelName, modelEval, ndim, paramNames = modelDictionary['name'], modelDictionary['pdf'], modelDictionary['ndim'], modelDictionary['params']
    144 
    145 
    146 
    147 ### Inference
    148 # Directory to send output to. Create it if it does not exist.
    149 directoryName = topDirectory + 'hypA_out/' + modelName[:4]
    150 if not os.path.exists(directoryName):
    151     os.makedirs(directoryName)
    152 
    153 # Run pymultinest sampling
    154 pymultinest.run(likelihood, prior, ndim, n_live_points=1000, sampling_efficiency=0.3, importance_nested_sampling=False, outputfiles_basename=directoryName + '/', resume=False, verbose=True)
    155 
    156 print("Code took {} seconds to run.".format(datetime.now() - startTime))
    157 print("Exiting!")
    158