// Innovation data with correlated lognormal distributions techData = [ {name: "SHR Geothermal", weight: 117, min: 0.43, max: 8.82}, {name: "SMR", weight: 966, min: 0.29, max: 7.64}, {name: "EGS", weight: 170, min: 0.22, max: 4.41}, {name: "Plant-Based Protein", weight: 153, min: 0.18, max: 4.04}, {name: "Cultivated Protein", weight: 117, min: 0.00001, max: 0.07}, // Avoid zero for lognormal {name: "Nat Gas + CCS", weight: 313, min: 0.21, max: 5.21}, {name: "Solar", weight: 497, min: 0.15, max: 4.19}, {name: "Green Cement", weight: 136, min: 0.13, max: 2.42},
// Cost-effectiveness of messaging campaigns directed at elected representatives. // I created the core model and then ran it through Squiggle AI to create the boilerplate. Then I wrote all the docstrings. // ==== Inputs ==== inputs = { @name("Proportion of Votes Within 1% (%)") @doc( "Proportion of vote outcomes that fall within one percentage point of the pass threshold. Based on historical voting records. The number has varied over time—it was 2.2% from 2000 to 2010, and 5.1% from 2015 to 2025.
voll = lognormal({p5: 10.6, p95:18.7}) // $/kWh //via ERCOT/Brattle — their 95CI fitted to 90% CI due to squiggle idiosyncracies threshold = 1*10^11 // $ lost_load_kWh = threshold / voll lost_load_TWh = lost_load_kWh / 10^9 total_us_consumption_TWh = 4070 duration_multiplier = lognormal({p25:2.8, p75:3.2}) //via
crises__per_decade = 1 to 7 chance_sentinel_identifies_a_crisis_a_week_to_two_months_beforehand = beta(6,10) // Large scale catastrophe chance_crisis_is_catastrophic_but_not_existential = beta(3, 100) badness_of_catastrophic_risk_vs_existential = 0.1 // Existential catastrophe chance_crisis_is_existential = beta(1, 100)
questions = 10 forecaster_time_for_intial_forecast = 4 to 20 forecaster_hourly_rate = 100 to 200 initial_forecast_cost = questions * forecaster_time_for_intial_forecast * forecaster_hourly_rate forecast_update = questions * (0.3 to 1) update_cost = forecast_update * forecaster_hourly_rate annual_cost_to_deliver = initial_forecast_cost + update_cost * 52
// How many AI digital workers? // ===================================================== // Key question: on tasks that AI can currently solve, how many digital workers could OpenAI deploy given their current inference compute stocks? // Method: // 1. We start with estimates we've made of OpenAI's compute stocks (currently around 1M H100-equivalents, split roughly equally between Hoppers and GB200s) // 2. From this, we account for utilization + only some fraction of compute going to inference, and get a daily amount of inference FLOP // 3. We then make assumptions about of GPT-5 active params, and based on this estimate (a) the number of GPT-5 inference tokens OpenAI could generate in a day. We ensemble this with another estimate of (a), based on OpenAI's announcement that they process 3 billion messages a day. // 4. We then estimate (b) the number of "tokens" spent by a human employee in a day, based on estimates of human thinking speed, as well as some token usage data from METR
/* Generated by Squiggle AI. Workflow ID: 168d0158-1934-49d1-9972-32851ae39b32 */ import "hub:ozziegooen/sTest" as sTest // Democracy Preservation vs. AI Safety Funding Analysis // This model examines the relative cost-effectiveness of funding // democratic election interventions vs. AI safety interventions // == Core Parameters ==
/* Cost to morally offset the harm of an LLM subscription by donating to AI safety organizations. Variables and their values were written by me; scaffolding and docs were initially written by AI and then heavily edited by me. Note: Any reference to "AI companies" refers specifically to frontier AI companies that are working toward AGI/ASI. */ @name("Revenue to Valuation Ratio")
/* Generated by Squiggle AI. Workflow ID: 583117f3-e55f-4334-86f4-4d094f193e4e */ import "hub:ozziegooen/sTest" as sTest // Youth Programs Cost-Effectiveness Model // Comparing Leaf (large, low-cost with select Fellows) vs NT (selective with scholarships) @name("Model Inputs") inputs = {
p =.3% to 3% // Percentage pop = 2M to 16M // Species population a = 1/(0.2 to 0.3125) // index -- typically 0.25 is used, ie. f^0.25 of species live in fraction 0.5 of the habitat. total_area = 10M to 15M // global rainforest area in sqkm fraction = (1-(1-p)^a)/(p*pop) // fraction of forest that must be destroyed to extinct one species area_per_extinction = total_area * fraction // in sqkm
/* relevant variables here p misaligned p control works timeline to agi */ agi_arrival_year = 28 to 39
// Estimate person-hours to first-draft this supplement, combining inside/outside views // Results are distributions; key outputs appear in the notebook at the end. // ===== Inputs ===== @name("Pages per item by section") @doc("Fixed page targets per question in each section") pagesPerItem = { sec1to3 = 1.5 sec4 = 0.8
//Sensitivity Analysis Examples in Squiggle // ===== Basic Model: Investment Returns ===== @name("Base Case Inputs") baseInputs = { @name("Initial Investment ($)") @format("$,.0f") initialInvestment = 100k
// TESTING: is multiplying EVs a good approximation of the EV of a multiplication? a = 1 to 1000 b = 1 to 1000 c = 1 to 1000 d = 1 to 1000 e = 1 to 1000 // EV of mult mult = a*b*c*d*e
// Assumptions total_campaign_benefit = beta({mean: 0.07, stdev: 0.04}) / 2 // div by 2 because we are only increasing half the voters' turnout labor_share = beta({mean: 0.01, stdev: 0.01}) labor_benefit = total_campaign_benefit * labor_share default_margin = normal(-0.07, 0.07) // dan osborn's margin was -7% // Calculations n = 10000 labor_samples = sampleN(labor_benefit, n) margin_samples = sampleN(default_margin, n)
/* Generated by Squiggle AI. Workflow ID: ec3fdb2e-104c-4847-bbfa-ad46559d669f */ import "hub:ozziegooen/sTest" as sTest // Cost-Benefit Analysis of Prostate Orgasm Exploration // This model evaluates the expected costs and benefits of pursuing prostate orgasms // == Input Parameters ==
/* For an AI coup to happen by 2035: * Develop very powerful AI systems * AI systems are catastrophically misaligned * AI systems are deployed in a coup enabling context * AI systems succesfully pull off a coup */ p_dev_powerful_ai_2035 = 0.8 p_cat_misalignment = 0.5
/* Generated by Squiggle AI. Workflow ID: ca63c333-eaa6-40be-a4b5-798a2ba5e8d4 David Reinstein -- edited from this starting point. The original model started with https://acbmcostcalculator.ucdavis.edu/ */ /// Initial code generated by GPT5 based on conversation at https://chatgpt.com/share/68b9b37b-810c-8002-9300-1f6c6a8da252 on 4 Sep 2025. /// Cultured Meat Cost (CM-COGS) — Simple Scenario Model (v0.1) /// Audience: economists & non-engineers /// Epistemic status: starter scaffold with reasonable ranges; replace with your own data. /// Conventions: all costs in USD; “kg” means edible wet weight. [?of pure CM cells?] /// cultured meat cost model (simple starter)