/* SIMPLE PPE COST-EFFECTIVENESS MODEL I thought that the old cost-effective model relied too much on a tower of fake assumptions, and I wanted something that was simpler and easier to intuit. This new model assumes that there's a certain amount of x-risk that's addressable with PPE alone. This risk exists because we either 1) don't have enough effective PPE in stock to protect everyone quickly, 2) we can't produce enough effective PPE to protect everyone quickly, or 3) both. The amount of this PPE-addressable x-risk that's *actually* addressed is a function of two things: how much we increase PPE stocks, and how much we increase PPE production capacity. To capture this, I use an OR function: 100% of the PPE-addressable risk has been reduced when: - We have 8B units of PPE OR - We're able to produce 8B units of PPE per day
/* A cost-effectiveness analysis for starting a communist revolution in the United States. Critics of EA like to say that it's impossible to create a CEA for a communist revolution. I am making this to prove that it's possible. This wasn't even hard, it took me like half an hour. I'm sure I could do a much better job if I invested more time. */ @doc("Conservative estimate. Surely communism is at least 20% better than capitalism but it's likely much better than that.
// Make a projection of the stock price of the top 5 AI companies, in the next 10 years, with a calculator for visualization import "hub:ozziegooen/sTest" as sTest @name("Top AI Companies") companies = [ { name: "GOOGL", description: "Alphabet Inc.", base: 2.02T,
export autoTable(data) = { keys = List.flatten(data) -> map(Dict.keys) -> List.flatten -> List.uniq Table.make( data, { columns: keys -> map({|key| { name: key, fn: {|f| f[key]} }}) } ) } @showAs(autoTable)
a = 0.5 // EU x = lognormal({mean: 0.18+a, stdev: 0.34}) - a // EU Correlation u0 = cdf(x, 0) // Probability that EU correlation is negative u0pt5 = cdf(x, 0.5) - cdf(x, 0) // Probability that EU correlation is between 0 and 0.5 u1 = cdf(x, 1) - cdf(x, .5) // Probability that EU correlation is between 0.5 and 1 u1plus = 1 - cdf(x, 1) // Probability that EU correlation is between 0.5 and 1 b = 0.5 // China
/* Describe your code here */ a = normal(2, 5)
// Link to post: /* */ // 🟨🟨 Setting up the functions (in general form): u_eta(w, eta) = (w^(1-eta)-1)/(1-eta) f_eta(w, eta) = 1-w^(1-eta) // the fraction of the problem that has been solved given a certain level of investment v_eta_i(w, eta, i)= i*(1-w^(1-eta))
/* Model for RFI question https://www.randforecastinginitiative.org/questions/1463-in-the-next-12-months-will-a-large-language-model-built-by-a-chinese-organization-rank-in-the-top-3-overall-on-the-chatbot-arena-llm-leaderboard */ /* Using metaculus estimates for GPT5 release: I think they're reasonable. https://www.metaculus.com/questions/15462/when-will-gpt5-be-announced/ */ dateLastUpdate = Date.make(2024, 11, 30) metaculusYearsNextGen_LB = toYears(Date.make(2025, 1, 20) - dateLastUpdate) metaculusYearsNextGen_UB = toYears(Date.make(2025, 7, 12) - dateLastUpdate) yearsUntilNextGenModels = metaculusYearsNextGen_LB to metaculusYearsNextGen_UB
k = 10^3 m = 10^6 b = 10^9 t= 10^12 n_phd = 20*k to 200*k n_stem = 5*m to 50*m n_ee = 50*m to 500*m b_phd = truncate(0.03 to 0.30,0.01,1)
// How much time would it take to migrate Squiggle Hub from GraphQL (slow) to RSC (10x faster)? // searching in VS Code with ` query [A-Z]` regex; not counting tests queries = 24 mutations = 25 minPerQuery() = mx( // some queries are made of nested fragments; estimation based on converting frontpage and user page definitions and groups, and I should become better with each query 7 to 15, // are there any queries where I might have to load them in portions, dataloader style? I hope not, but not sure
guests = 70 to 115 guests_served_per_full_size_pan = 15 to 40 types_of_heated_food = 2 to 4 number_of_pans = guests / guests_served_per_full_size_pan * types_of_heated_food pan_width = 10 to 13 pan_length = 18 to 22 pan_water_depth = 0.75 to 2 cubic_inches_of_water_per_pan = pan_width * pan_length * pan_water_depth total_cubic_inches_of_water = number_of_pans * cubic_inches_of_water_per_pan gallons_of_water = total_cubic_inches_of_water / 231
/* Model for RFI question https://www.randforecastinginitiative.org/questions/1475-how-many-german-language-disinformation-cases-originating-in-pro-kremlin-media-will-there-be-between-1-october-2024-and-30-september-2025-compared-to-the-same-period-one-year-prior */ /* Define question info */ dateStartPeriod = Date.make(2024,10,01) dateEndPeriod = Date.make(2025,09,30) previousYearsCounts = [67, 14, 82, 78] /* Input latest data */
import "hub:ozziegooen/sTest" as sTest @doc( "A basketball win probability model that estimates the probability of the home team winning and margin of victory based on current game state and pre-game estimates" ) winProbModel = { @name("Game State and Setup") inputs = { @name("Current Margin") @doc( "Current score difference (home - away). Positive means home team ahead"
m = 10 ^ 6 ukraine_duration = 3 to 6 ukraine_scope = 0.225 * m us_damages = 7000 * m to 14000 * m us_duration = 4 * 24 us_scope = 55 * m scope_factor = us_scope / ukraine_scope
// == Market Inputs == @name("Market Factors") market = { @name("Square Footage") squareFootage = 4500 to 6000 @name("Cost per SqFt to Purchase") @format("$,.2f") costPerSqFt = 200 to 300
// == Market Inputs == @name("Market Factors") market = { @name("Square Footage") squareFootage = 4500 to 6000 @name("Cost per SqFt to Purchase") @format("$,.2f") costPerSqFt = 220 to 300
// == Market Inputs == @name("Market Factors") market = { @name("Square Footage") squareFootage = 8000 to 12000 @name("Cost per SqFt to Purchase") @format("$,.2f") costPerSqFt = 300 to 600
/* Monthly revenue of a banya */ cost_per_sq_foot = 18 to 35 sq_footage = 8000 to 20000 rent = sq_footage * cost_per_sq_foot * 12 /* Electricity costs