diff --git a/ddToolbox/Data/Data.m b/ddToolbox/Data/Data.m index 0dcc4644..b8c8729f 100644 --- a/ddToolbox/Data/Data.m +++ b/ddToolbox/Data/Data.m @@ -32,6 +32,7 @@ p.addParameter('files',[],@(x) iscellstr(x)|ischar(x)); p.parse(dataFolder, varargin{:}); + assert(~isempty(p.Results.files), 'no filenames provided under ''files'' input argument') try table(); catch diff --git a/ddToolbox/models/Model.m b/ddToolbox/models/Model.m index 360681ae..10416a8a 100644 --- a/ddToolbox/models/Model.m +++ b/ddToolbox/models/Model.m @@ -92,6 +92,7 @@ obj = obj.postSamplingActivities(); end + function obj = postSamplingActivities(obj) %% Post-sampling activities (for model sub-classes) ----------- @@ -101,26 +102,39 @@ obj = obj.calcDerivedMeasures(); %% Post-sampling activities (common to all models) ------------ + % posterior prediction calculation obj.postPred = PosteriorPrediction(obj.coda, obj.data, obj.observedData); + % calc and export convergence summary and parameter estimates + obj.export(); - % TODO: This should be a method of CODA - convergenceSummary(obj.coda.getStats('Rhat',[]), obj.plotOptions.savePath, obj.data.getIDnames('all')) - - exporter = ResultsExporter(obj.coda, obj.data, obj.postPred.postPred, obj.varList, obj.plotOptions); - exporter.printToScreen(); - exporter.export(obj.plotOptions.savePath, obj.plotOptions.pointEstimateType); - % TODO ^^^^ avoid this duplicate use of pointEstimateType - + % plot or not if ~strcmp(obj.plotOptions.shouldPlot,'no') % TODO: Allow public calls of obj.plot to specify options. % At the moment the options need to be provided on Model % object construction obj.plot() end - + obj.tellUserAboutPublicMethods() end + + function export(obj) + % TODO: This should be a method of CODA + % TODO: better function name. What does it do? Calculate or + % export, or both? + convergenceSummary(obj.coda.getStats('Rhat',[]),... + obj.plotOptions.savePath,... + obj.data.getIDnames('all')) + + exporter = ResultsExporter(obj.coda, obj.data,... + obj.postPred.postPred,... + obj.varList,... + obj.plotOptions); + exporter.printToScreen(); + exporter.export(obj.plotOptions.savePath, obj.plotOptions.pointEstimateType); + % TODO ^^^^ avoid this duplicate use of pointEstimateType + end %% Public MIDDLE-MAN METHODS diff --git a/ddToolbox/models/nonparametric_models/DF_SLICE_PsychometricFunction.m b/ddToolbox/models/nonparametric_models/DF_SLICE_PsychometricFunction.m index f30ff1aa..0fdbacb1 100644 --- a/ddToolbox/models/nonparametric_models/DF_SLICE_PsychometricFunction.m +++ b/ddToolbox/models/nonparametric_models/DF_SLICE_PsychometricFunction.m @@ -29,8 +29,9 @@ function plot(obj) % - x = [-2:0.01:2]; - + %x = [-2:0.01:2]; + x = [0:0.01:2]; + try plot(x, obj.eval(x, 'nExamples', 100), '-', 'Color',[0.5 0.5 0.5 0.1]) catch diff --git a/ddToolbox/models/nonparametric_models/separateNonParametric.jags b/ddToolbox/models/nonparametric_models/separateNonParametric.jags index 83365149..f25f900c 100644 --- a/ddToolbox/models/nonparametric_models/separateNonParametric.jags +++ b/ddToolbox/models/nonparametric_models/separateNonParametric.jags @@ -21,22 +21,41 @@ model{ groupALPHAmu <- 0 # TODO: NEEDS ATTENTION -groupALPHAsigma <-0.01 # TODO: NEEDS ATTENTION +groupALPHAsigma <-0.1 # TODO: NEEDS ATTENTION epsilon_alpha <- 1+1 -epsilon_beta <- 1+100 +epsilon_beta <- 1+200 + +#SIGMA ~ dgamma(1,1) +SIGMA <- 0.05 # priors + +# precision of the indifference point (Rstar) is SIGMA multiplied by the number of time steps since the previous delay. This is how our uncertainty increase over time. +prec[1] <- 1/((SIGMA * uniqueDelays[1])^2) +for (d in 2:length(uniqueDelays)) { + prec[d] <- 1/((SIGMA * (uniqueDelays[d]-uniqueDelays[d-1]))^2) +} + for (p in participantIndexList){ epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) + + # note reparameterisation is not working in this case. As in, it causes an error. alpha[p] ~ dnorm(groupALPHAmu, 1/(groupALPHAsigma^2)) T(0,) # PRIOR OVER DISCOUNT FRACTION - # prior over A/B should be centered on 1 if we are expecting discounting and anti-discounting, and can be truncates - # prior over A-B should be centered on 0 if we are expecting discounting and anti-discounting, and can be truncates - for (d in 1:length(uniqueDelays)) { - Rstar[p,d] ~ dnorm(1, 1/ 1^2) T(0,) + # assume indifference point (RStar) at the first delay (after delay=0) is centered on 1 + mu[p,1] <- 1 + # assume indifference point at all subsequent delays are centered on the previous indifference point + for (d in 2:length(uniqueDelays)) { + mu[p,d] <- mu[p,d-1] } + + for (d in 1:length(uniqueDelays)) { + Rstar[p,d] ~ dnorm(mu[p,d], prec[d]) T(0,) + #Rstar[p,d] ~ dt(mu[p,d], prec[d], 1) T(0,) # equals Cauchy when k=1 + } + } for (t in 1:length(ID)) { diff --git a/ddToolbox/models/parametric_models/bens_new_model/ModelSeparateExpPower.m b/ddToolbox/models/parametric_models/bens_new_model/ModelSeparateExpPower.m index c6489721..69e9390e 100644 --- a/ddToolbox/models/parametric_models/bens_new_model/ModelSeparateExpPower.m +++ b/ddToolbox/models/parametric_models/bens_new_model/ModelSeparateExpPower.m @@ -20,8 +20,8 @@ % Generate initial values of the root nodes nExperimentFiles = obj.data.getNExperimentFiles(); for chain = 1:nchains - initialParams(chain).k = unifrnd(0.01, 0.5, [nExperimentFiles,1]); - initialParams(chain).tau = unifrnd(0.01, 2, [nExperimentFiles,1]); + initialParams(chain).k = unifrnd(-1, 1, [nExperimentFiles,1]); + initialParams(chain).tau = unifrnd(0.01, 1.2, [nExperimentFiles,1]); initialParams(chain).epsilon = 0.01 + rand([nExperimentFiles,1])./10; initialParams(chain).alpha = abs(normrnd(0.01,5,[nExperimentFiles,1])); end diff --git a/ddToolbox/models/parametric_models/bens_new_model/mixedExpPower.jags b/ddToolbox/models/parametric_models/bens_new_model/mixedExpPower.jags index d9e3ef76..420d8639 100644 --- a/ddToolbox/models/parametric_models/bens_new_model/mixedExpPower.jags +++ b/ddToolbox/models/parametric_models/bens_new_model/mixedExpPower.jags @@ -33,6 +33,7 @@ for (t in 1:length(ID)) { # NOTE: because we have 2 discounting parameters, I've found that more restrictive priors need to be placed on the error parameters alpha_mu ~ dnorm(0,1/(10^2)) T(0,) alpha_precision ~ dgamma(0.01,0.01) +alpha_sigma <- sqrt(1/alpha_precision) # error rates (epsilon) hyperprior groupW ~ dbeta(1.1, 10.9) # mode for lapse rate @@ -43,8 +44,11 @@ epsilon_alpha <- groupW*(groupK-2)+1 epsilon_beta <- (1-groupW)*(groupK-2)+1 for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant - alpha[p] ~ dnorm(alpha_mu, alpha_precision) T(0,) epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) + + # using reparameterisation to avoid funnel of hell + alpha_offset[p] ~ dnorm(0,1) T(0,) + alpha[p] <- alpha_mu + alpha_offset[p] * alpha_sigma } diff --git a/ddToolbox/models/parametric_models/bens_new_model/separateExpPower.jags b/ddToolbox/models/parametric_models/bens_new_model/separateExpPower.jags index 758319e6..4be81df1 100644 --- a/ddToolbox/models/parametric_models/bens_new_model/separateExpPower.jags +++ b/ddToolbox/models/parametric_models/bens_new_model/separateExpPower.jags @@ -11,15 +11,16 @@ model{ # RANDOM (BY PARTICIPANT) FACTORS; HYPER-PRIORS = NO K_MEAN <- 0 # <---- currently guesstimating -K_PRECISION <- 1/(0.1^2) #sigma=0.1 # <---- currently guesstimating +K_PRECISION ~ dgamma(0.01, 0.01) #sigma=0.1 # <---- currently guesstimating for (p in 1:nRealExperimentFiles){ # no +1 because no shrinkage hyperprior - k[p] ~ dnorm(K_MEAN, K_PRECISION) - tau[p] ~ dexp(1) + k[p] ~ dt(K_MEAN, K_PRECISION, 1) + tau[p] ~ dnorm(1, 1/0.2^2) T(0.0001, 1.5) #<------ check these truncation bounds } # MODEL-SPECIFIC: CALCULATION OF PRESENT SUBJECTIVE VALUES for (t in 1:length(ID)) { + #VA[t] <- A[t] # assuming DA=0 VA[t] <- A[t] * (exp( -k[ID[t]] * (DA[t]^tau[ID[t]]) ) ) VB[t] <- B[t] * (exp( -k[ID[t]] * (DB[t]^tau[ID[t]]) ) ) } diff --git a/ddToolbox/models/parametric_models/exponential_models/hierarchicalExp1.jags b/ddToolbox/models/parametric_models/exponential_models/hierarchicalExp1.jags index 9431775f..6ee27d23 100644 --- a/ddToolbox/models/parametric_models/exponential_models/hierarchicalExp1.jags +++ b/ddToolbox/models/parametric_models/exponential_models/hierarchicalExp1.jags @@ -12,9 +12,12 @@ model{ K_MEAN ~ dnorm(0.01, 1/(0.5^2)) K_PRECISION ~ dgamma(0.001,0.001) +K_SIGMA <- sqrt(1/K_PRECISION) for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant - k[p] ~ dnorm(K_MEAN, K_PRECISION) + # using reparameterisation to avoid funnel of hell + k_offset[p] ~ dnorm(0,1) + k[p] <- K_MEAN + k_offset[p] * K_SIGMA } # MODEL-SPECIFIC: CALCULATION OF PRESENT SUBJECTIVE VALUES @@ -39,8 +42,11 @@ epsilon_alpha <- groupW*(groupK-2)+1 epsilon_beta <- (1-groupW)*(groupK-2)+1 for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant - alpha[p] ~ dnorm(groupALPHAmu, 1/(groupALPHAsigma^2)) T(0,) epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) + + # using reparameterisation to avoid funnel of hell + alpha_offset[p] ~ dnorm(0,1) T(0,) + alpha[p] <- groupALPHAmu + alpha_offset[p] * groupALPHAsigma } diff --git a/ddToolbox/models/parametric_models/exponential_models/mixedExp1.jags b/ddToolbox/models/parametric_models/exponential_models/mixedExp1.jags index 7016c828..a22d4fff 100644 --- a/ddToolbox/models/parametric_models/exponential_models/mixedExp1.jags +++ b/ddToolbox/models/parametric_models/exponential_models/mixedExp1.jags @@ -10,7 +10,7 @@ model{ # DISCOUNT FUNCTION PARAMETERS ================================================= # RANDOM (BY PARTICIPANT) FACTORS; HYPER-PRIORS = NO -K_MEAN <- 0 # <---- currently guesstimating +K_MEAN <- 0.01 # <---- currently guesstimating K_PRECISION <- 1/(0.5^2) # <---- currently guesstimating for (p in 1:nRealExperimentFiles){ # no +1 because no shrinkage hyperprior @@ -39,8 +39,11 @@ epsilon_alpha <- groupW*(groupK-2)+1 epsilon_beta <- (1-groupW)*(groupK-2)+1 for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant - alpha[p] ~ dnorm(groupALPHAmu, 1/(groupALPHAsigma^2)) T(0,) epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) + + # using reparameterisation to avoid funnel of hell + alpha_offset[p] ~ dnorm(0,1) T(0,) + alpha[p] <- groupALPHAmu + alpha_offset[p] * groupALPHAsigma } diff --git a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/hierarchicalME.jags b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/hierarchicalME.jags index 3813d833..e8792ec0 100644 --- a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/hierarchicalME.jags +++ b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/hierarchicalME.jags @@ -29,9 +29,12 @@ groupCmu ~ dnorm(0, 1/(10000^2)) groupCsigma ~ dunif(0, 10000) for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant - m[p] ~ dnorm(groupMmu, 1/(groupMsigma^2)) - c[p] ~ dnorm(groupCmu, 1/(groupCsigma^2)) -} + # using reparameterisation to avoid funnel of hell + m_offset[p] ~ dnorm(0,1) + m[p] <- groupMmu + m_offset[p] * groupMsigma + c_offset[p] ~ dnorm(0,1) + c[p] <- groupCmu + c_offset[p] * groupCsigma +} # MODEL-SPECIFIC: CALCULATION OF PRESENT SUBJECTIVE VALUES for (t in 1:length(ID)) { @@ -59,7 +62,10 @@ epsilon_beta <- (1-groupW)*(groupK-2)+1 for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) - alpha[p] ~ dnorm(groupALPHAmu, 1/(groupALPHAsigma^2)) T(0,) + + # using reparameterisation to avoid funnel of hell + alpha_offset[p] ~ dnorm(0,1) T(0,) + alpha[p] <- groupALPHAmu + alpha_offset[p] * groupALPHAsigma } # MODEL IN-SPECIFIC CODE BELOW... SHOULD NOT CHANGE ACROSS MODELS ============== diff --git a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/hierarchicalMEupdated.jags b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/hierarchicalMEupdated.jags index 2e7d8c12..f48b5776 100644 --- a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/hierarchicalMEupdated.jags +++ b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/hierarchicalMEupdated.jags @@ -29,8 +29,11 @@ groupCmu ~ dnorm(0, 1/(100^2)) ## UPDATED SINCE PAPER groupCsigma ~ dunif(0, 10) ## UPDATED SINCE PAPER for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant - m[p] ~ dnorm(groupMmu, 1/(groupMsigma^2)) - c[p] ~ dnorm(groupCmu, 1/(groupCsigma^2)) + # using reparameterisation to avoid funnel of hell + m_offset[p] ~ dnorm(0,1) + m[p] <- groupMmu + m_offset[p] * groupMsigma + c_offset[p] ~ dnorm(0,1) + c[p] <- groupCmu + c_offset[p] * groupCsigma } # MODEL-SPECIFIC: CALCULATION OF PRESENT SUBJECTIVE VALUES @@ -59,7 +62,9 @@ epsilon_beta <- (1-groupW)*(groupK-2)+1 for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) - alpha[p] ~ dnorm(groupALPHAmu, 1/(groupALPHAsigma^2)) T(0,) + # using reparameterisation to avoid funnel of hell + alpha_offset[p] ~ dnorm(0,1) T(0,) + alpha[p] <- groupALPHAmu + alpha_offset[p] * groupALPHAsigma } # MODEL IN-SPECIFIC CODE BELOW... SHOULD NOT CHANGE ACROSS MODELS ============== diff --git a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/mixedME.jags b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/mixedME.jags index 23c8fc71..93129c1d 100644 --- a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/mixedME.jags +++ b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/mixedME.jags @@ -48,7 +48,10 @@ epsilon_beta <- (1-groupW)*(groupK-2)+1 for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) - alpha[p] ~ dnorm(groupALPHAmu, 1/(groupALPHAsigma^2)) T(0,) + + # using reparameterisation to avoid funnel of hell + alpha_offset[p] ~ dnorm(0,1) T(0,) + alpha[p] <- groupALPHAmu + alpha_offset[p] * groupALPHAsigma } diff --git a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/separateME.jags b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/separateME.jags index 441e8e57..a1ef589d 100644 --- a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/separateME.jags +++ b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/separateME.jags @@ -7,8 +7,8 @@ model{ # RANDOM (BY PARTICIPANT) FACTORS; HYPER-PRIORS = NO for (p in 1:nRealExperimentFiles){ - m[p] ~ dnorm(-0.243, 1/(100^2)) # dnorm(-0.243, 1/(0.072^2)) - c[p] ~ dnorm(0, 1/(1000^2)) # dnorm(0, 1/(1000^2)) + m[p] ~ dnorm(-0.243, 1/(0.5^2)) # dnorm(-0.243, 1/(0.072^2)) + c[p] ~ dnorm(0, 1/(10^2)) # dnorm(0, 1/(1000^2)) } for (t in 1:length(ID)) { @@ -22,8 +22,8 @@ for (t in 1:length(ID)) { } # RESPONSE ERROR PARAMETERS ==================================================== -epsilon_alpha <- 1.1 -epsilon_beta <- 10.9 +epsilon_alpha <- 1+1 +epsilon_beta <- 1+10 for (p in 1:nRealExperimentFiles){ alpha[p] ~ dexp(0.01) epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) diff --git a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/separateME.stan b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/separateME.stan index d990145e..ef6a51ac 100644 --- a/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/separateME.stan +++ b/ddToolbox/models/parametric_models/hyperbolic_magnitude_effect_models/separateME.stan @@ -64,7 +64,7 @@ transformed parameters { model { m ~ normal(-0.243, 0.5); c ~ normal(0, 10); - epsilon ~ beta(1.1, 10.9); + epsilon ~ beta(1+1, 1+10); alpha ~ exponential(0.01); R ~ bernoulli(P); diff --git a/ddToolbox/models/parametric_models/hyperbolic_models/hierarchicalLogK.jags b/ddToolbox/models/parametric_models/hyperbolic_models/hierarchicalLogK.jags index 2d7b86a5..fe00cc1e 100644 --- a/ddToolbox/models/parametric_models/hyperbolic_models/hierarchicalLogK.jags +++ b/ddToolbox/models/parametric_models/hyperbolic_models/hierarchicalLogK.jags @@ -10,8 +10,9 @@ groupLogKmu ~ dnorm(log(1/50),1/(2.5^2)) groupLogKsigma ~ dexp(0.5) for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant - # small constant (0.0001) added below to avoid numerical problems. - logk[p] ~ dnorm(groupLogKmu, 1/((groupLogKsigma+0.0001)^2)) + # using reparameterisation to avoid funnel of hell + logk_offset[p] ~ dnorm(0,1) + logk[p] <- groupLogKmu + logk_offset[p] * groupLogKsigma } # MODEL-SPECIFIC: CALCULATION OF PRESENT SUBJECTIVE VALUES @@ -36,7 +37,10 @@ epsilon_beta <- (1-groupW)*(groupK-2)+1 for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) - alpha[p] ~ dnorm(groupALPHAmu, 1/(groupALPHAsigma^2)) T(0,) + + # using reparameterisation to avoid funnel of hell + alpha_offset[p] ~ dnorm(0,1) T(0,) + alpha[p] <- groupALPHAmu + alpha_offset[p] * groupALPHAsigma } diff --git a/ddToolbox/models/parametric_models/hyperbolic_models/mixedLogK.jags b/ddToolbox/models/parametric_models/hyperbolic_models/mixedLogK.jags index 76c5cbe6..7bf3c767 100644 --- a/ddToolbox/models/parametric_models/hyperbolic_models/mixedLogK.jags +++ b/ddToolbox/models/parametric_models/hyperbolic_models/mixedLogK.jags @@ -11,7 +11,9 @@ groupLogKmu <- log(1/50) groupLogKsigma <- 2.5 for (p in 1:nRealExperimentFiles){ - logk[p] ~ dnorm(groupLogKmu, 1/(groupLogKsigma^2)) + # using reparameterisation to avoid funnel of hell + logk_offset[p] ~ dnorm(0,1) + logk[p] <- groupLogKmu + logk_offset[p] * groupLogKsigma } @@ -30,7 +32,10 @@ epsilon_beta <- (1-groupW)*(groupK-2)+1 for (p in 1:(nRealExperimentFiles+1)){ # +1 for unobserved participant epsilon[p] ~ dbeta(epsilon_alpha , epsilon_beta ) T(,0.5) - alpha[p] ~ dnorm(groupALPHAmu, 1/(groupALPHAsigma^2)) T(0,) + + # using reparameterisation to avoid funnel of hell + alpha_offset[p] ~ dnorm(0,1) T(0,) + alpha[p] <- groupALPHAmu + alpha_offset[p] * groupALPHAsigma } diff --git a/ddToolbox/sampleWithMatlabStan.m b/ddToolbox/sampleWithMatlabStan.m index bcc9b3d1..9c8c2a67 100644 --- a/ddToolbox/sampleWithMatlabStan.m +++ b/ddToolbox/sampleWithMatlabStan.m @@ -1,5 +1,5 @@ function codaObject = sampleWithMatlabStan(... - modelFilename, observedData, mcmcparams, ~, ~) + modelFilename, observedData, mcmcparams, initialParameters, ~) assert(ischar(modelFilename)) assert(isstruct(observedData)) @@ -17,11 +17,16 @@ toc % ++++++++++++++++++++++++++++++++++++++++++++++++++ +% % convert initialParameters. % TODO: make this general +% init.groupW = [initialParameters(:).groupW]'; +% init.groupALPHAmu = [initialParameters(:).groupALPHAmu]'; +% init.groupALPHAsigma = [initialParameters(:).groupALPHAsigma]'; + %% Get our sampler to sample display('SAMPLING STAN MODEL...') tic obj.stanFit = stan_model.sampling(... - 'data', observedData,... + 'data', observedData,... %'init', init,... 'warmup', mcmcparams.nburnin,... % warmup = burn-in 'iter', ceil( mcmcparams.nsamples / mcmcparams.nchains),... % iter = number of MCMC samples 'chains', mcmcparams.nchains,... diff --git a/ddToolbox/utils-plot/apply_plot_function_to_subplot_handle.m b/ddToolbox/utils-plot/apply_plot_function_to_subplot_handle.m index b510dbaa..c21bbf39 100644 --- a/ddToolbox/utils-plot/apply_plot_function_to_subplot_handle.m +++ b/ddToolbox/utils-plot/apply_plot_function_to_subplot_handle.m @@ -1,9 +1,9 @@ -function apply_plot_function_to_subplot_handle(func, handle, plotdata) +function apply_plot_function_to_subplot_handle(plotFunction, handle, plotdata) -if ~isa(func,'function_handle') +if ~isa(plotFunction,'function_handle') return end subplot(handle) -func(plotdata); -end \ No newline at end of file +plotFunction(plotdata); +end diff --git a/ddToolbox/utils-plot/autoXlim.m b/ddToolbox/utils-plot/autoXlim.m index 57c8f116..aef15524 100644 --- a/ddToolbox/utils-plot/autoXlim.m +++ b/ddToolbox/utils-plot/autoXlim.m @@ -3,29 +3,29 @@ function autoXlim(samples, supp) % provided and +/- X std. But not outside of the support of the variable % double check samples is a vector -samples=samples(:); +samples = samples(:); M = median(samples); STD = std(samples); -lower = M-STD*3; -upper = M+STD*3; +lower = M - STD * 3; +upper = M + STD * 3; -if isnumeric(supp)~=1 +if isnumeric(supp) ~= 1 switch supp case{'positive'} supp=[0 inf]; end end -if lowermax(supp) - upper=max(supp); +if upper > max(supp) + upper = max(supp); end xlim([lower upper]) drawnow -return \ No newline at end of file +return diff --git a/ddToolbox/utils-plot/clusterPlot.m b/ddToolbox/utils-plot/clusterPlot.m index 03916e1e..2c540cd7 100644 --- a/ddToolbox/utils-plot/clusterPlot.m +++ b/ddToolbox/utils-plot/clusterPlot.m @@ -5,12 +5,12 @@ function clusterPlot(mcmcContainer, data, col, modelType, plotOptions, vars) varNames = {vars.name}; -if numel(varNames)==1 +if numel(varNames) == 1 plot1Dclusters(mcmcContainer, data, col, modelType, plotOptions, vars); -elseif numel(varNames)==2 +elseif numel(varNames) == 2 plot2Dclusters(mcmcContainer, data, col, modelType, plotOptions, vars); else error('can only deal with plotting univariate or bivariate distributions') end -end \ No newline at end of file +end diff --git a/ddToolbox/utils-plot/forceNonExponentialTick.m b/ddToolbox/utils-plot/forceNonExponentialTick.m index b0c72734..138a4288 100644 --- a/ddToolbox/utils-plot/forceNonExponentialTick.m +++ b/ddToolbox/utils-plot/forceNonExponentialTick.m @@ -1,4 +1,4 @@ function forceNonExponentialTick -set(gca,'xticklabel',num2str(get(gca,'xtick')')) -set(gca,'yticklabel',num2str(get(gca,'ytick')')) -return \ No newline at end of file +set(gca, 'xticklabel', num2str(get(gca,'xtick')')) +set(gca, 'yticklabel', num2str(get(gca,'ytick')')) +return diff --git a/ddToolbox/utils-plot/holdDecorator.m b/ddToolbox/utils-plot/holdDecorator.m new file mode 100644 index 00000000..b80dd1e6 --- /dev/null +++ b/ddToolbox/utils-plot/holdDecorator.m @@ -0,0 +1,27 @@ +function [outputs] = holdDecorator(plotFunction) +% This is a decorator function which does some work before and after the +% provided plotFunction is exectuted. In this case, we are setting the hold +% state of a figure to be on, then returning to it's original state. +% +% We assume plotFunction() is self contained, in that calling it will work +% without any need for input arguments. So we may well have created a +% partial function before, eg: +% plotFunc = @() myPlottingFunction(x, y, options); +% outputs = holdDecorator(plotFunc) + +% NOTE: here we do wrapping and exectution. It is also possible to return a +% function which is decorated. + +%% 1. before steps +initial_hold_state = ishold(gca); +hold on + +%% 2. call the function +[outputs] = plotFunction(); + +%% 3. after steps +if initial_hold_state == 0 + hold off +end + +end \ No newline at end of file diff --git a/ddToolbox/utils-plot/my_errorbarsUL.m b/ddToolbox/utils-plot/my_errorbarsUL.m index 0c5ff553..b46e28e7 100644 --- a/ddToolbox/utils-plot/my_errorbarsUL.m +++ b/ddToolbox/utils-plot/my_errorbarsUL.m @@ -2,10 +2,10 @@ % U = upper % L = lower -l=zeros(numel(X),1); +l = zeros(numel(X),1); for n=1:numel(X) - lineHandles(n) = line([X(n) X(n)],[U(n) L(n)]); + lineHandles(n) = line([X(n) X(n)], [U(n) L(n)]); end %% Apply formatting @@ -15,4 +15,4 @@ % http://www.mathworks.co.uk/help/matlab/ref/patch_props.html for n=1:2:numel(opts) set(lineHandles, opts{n}, opts{n+1}); -end \ No newline at end of file +end diff --git a/ddToolbox/utils-plot/my_shaded_errorbar_zone_UL.m b/ddToolbox/utils-plot/my_shaded_errorbar_zone_UL.m index f7d34062..9d4a1506 100644 --- a/ddToolbox/utils-plot/my_shaded_errorbar_zone_UL.m +++ b/ddToolbox/utils-plot/my_shaded_errorbar_zone_UL.m @@ -11,28 +11,19 @@ % % written by: Benjamin T Vincent +h = holdDecorator( plotErrorBarZone(x, upper, lower, col) ); -g=ishold(gca); -hold on +end -%% +function h = plotErrorBarZone(x, upper, lower, col) % draw the shaded error bar zone -x =[x,fliplr(x)]; -y =[upper,fliplr(lower)]; -h =patch(x,y,[0.8 0.8 0.8]); - -% move it to the back -uistack(h,'bottom') - -% set the colour +x = [x, fliplr(x)]; +y = [upper, fliplr(lower)]; +h = patch(x, y, [0.8 0.8 0.8]); +% formatting +uistack(h,'bottom') set(h,'EdgeColor','none') set(h,'FaceColor',col) - set(h,'FaceAlpha',0.2) - -% leave the figure hold status as it was -if g==0 - hold off end -return \ No newline at end of file diff --git a/ddToolbox/utils-plot/ribbon_plot.m b/ddToolbox/utils-plot/ribbon_plot.m index 4772cbf8..b91ec76e 100644 --- a/ddToolbox/utils-plot/ribbon_plot.m +++ b/ddToolbox/utils-plot/ribbon_plot.m @@ -5,12 +5,6 @@ function ribbon_plot(x,Y, intervals) col = [0 0 1]; -% function to convert a credible interval to percentiles -% eg -% interval2percentiles(50) = [25 75] -% interval2percentiles(95) = [2.5 97.5] -interval2percentiles = @(interval) [0+((100-interval)/2) 100-((100-interval)/2)]; - % sort intervals from narrow to wide intervals = sort(intervals, 'descend'); diff --git a/ddToolbox/utils/interval2percentiles.m b/ddToolbox/utils/interval2percentiles.m new file mode 100644 index 00000000..f1c3115b --- /dev/null +++ b/ddToolbox/utils/interval2percentiles.m @@ -0,0 +1,14 @@ +function percentiles = interval2percentiles(interval) +% function to convert a credible interval to percentiles +% eg +% interval2percentiles(50) = [25 75] +% interval2percentiles(95) = [2.5 97.5] + +assert(isscalar(interval), 'expecting scalar input') +assert(interval>=0, 'interval must be between 0-100') +assert(interval<=100, 'interval must be between 0-100') + +percentiles = [0+((100-interval)/2)... + 100-((100-interval)/2)]; + +end