id
stringlengths 22
42
| metadata
dict | text
stringlengths 9
1.03M
|
---|---|---|
proofpile-julia0005-42449 | {
"provenance": "014.jsonl.gz:242450"
} | using Random, Test
using LatexLH, ModelObjectsLH, ModelParams, CollegeEntry
ce = CollegeEntry;
function constructor_test()
@testset "Constructors" begin
J = 8; nc = 3;
switches = make_entry_switches_oneloc(J, nc);
@test validate_es(switches)
end
end
function access_test(switches)
@testset "Access routines" begin
# println("\n--------------------------")
# println(switches);
objId = ObjectId(:entryDecision);
st = ce.make_test_symbol_table();
e = init_entry_decision(objId, switches, st);
# println(e);
J = n_types(e);
nc = n_colleges(e);
nl = n_locations(e);
@test 0.0 < min_entry_prob(e) < max_entry_prob(e) < 1.0
@test entry_pref_scale(e) > 0.0
@test isa(CollegeEntry.value_local(e), Float64)
capacityV = capacities(e);
if any(capacityV .< 1e6)
@test CollegeEntry.limited_capacity(e)
else
@test !CollegeEntry.limited_capacity(e)
end
typeMass_jlM = type_mass_jl(e);
@test size(typeMass_jlM) == (J, nl)
@test all(typeMass_jlM .>= 0.0)
for j = 1 : J
@test isapprox(typeMass_jlM[j,:], type_mass_jl(e, j))
@test isapprox(sum(typeMass_jlM[j,:]), type_mass_j(e, j))
end
@test type_mass_jl(e, J, nl) == typeMass_jlM[J, nl]
capacity_clM = capacities(e);
@test size(capacity_clM) == (nc, nl)
for ic = 1 : nc
@test isapprox(capacity_clM[ic,:], CollegeEntry.capacity(e, ic))
end
if nl == 1
@test CollegeEntry.value_local(e) == 0.0
end
end
end
function subset_switches_test(switches)
@testset "Subset switches" begin
idxV = 2 : 2 : n_types(switches);
CollegeEntry.subset_types!(switches, idxV);
@test validate_es(switches)
@test n_types(switches) == length(idxV)
end
end
# Test `entry_probs` which has no notion of locations
function entry_test(switches, prefShocks :: Bool)
rng = MersenneTwister(49);
@testset "Entry probs: $switches" begin
F1 = Float64;
st = ce.make_test_symbol_table();
e = init_entry_decision(ObjectId(:entry), switches, st);
# println("\n------------")
# println(e);
# println("Preference shocks: $prefShocks");
J = n_types(e);
nc = n_colleges(e);
nl = n_locations(e);
vWork_jV, vCollege_jcM = CollegeEntry.values_for_test(rng, J, nc, nl);
admitV = [1, 3];
rejectV = [2];
prob_jcM, eVal_jV = entry_probs(e, vWork_jV, vCollege_jcM, admitV;
prefShocks = prefShocks);
@test all(prob_jcM .>= 0.0)
@test all(prob_jcM .<= 1.0)
@test size(prob_jcM) == (J, nc)
@test size(eVal_jV) == (J,)
@test all(prob_jcM[:, rejectV] .== 0.0)
# One person at a time
for j = 1 : J
prob_cV, eVal =
entry_probs(e, vWork_jV[j], vCollege_jcM[j,:], admitV;
prefShocks = prefShocks);
@test isapprox(prob_cV, prob_jcM[j,:])
@test isapprox(eVal, eVal_jV[j])
end
# Increasing a value should increase probability
# Not for limited capacities, though
# Also not without pref shocks
if !limited_capacity(e) && prefShocks
idx = admitV[end];
otherAdmitV = admitV[1 : (end-1)];
vCollege_jcM[:, idx] .+= 0.1;
prob2_jcM, eVal2_jV = entry_probs(e, vWork_jV, vCollege_jcM, admitV;
prefShocks = prefShocks);
@test all(prob2_jcM[:, idx] .> prob_jcM[:, idx])
@test all(prob2_jcM[:, otherAdmitV] .< prob_jcM[:, otherAdmitV])
@test all(eVal2_jV .> eVal_jV)
end
# Empty admission set
prob_jcM, eVal_jV = entry_probs(e, vWork_jV, vCollege_jcM, [];
prefShocks = prefShocks);
@test size(prob_jcM) == (J, nc)
@test all(prob_jcM .== 0.0)
# if !isa(e, EntryTwoStep)
@test isapprox(eVal_jV, vWork_jV)
# end
end
end
# Check that small preference shocks give about the same answer as no preference shocks
function small_pref_entry_test(switches)
rng = MersenneTwister(49);
@testset "Entry probs" begin
F1 = Float64;
ce.set_pref_scale!(switches, 0.001);
st = ce.make_test_symbol_table();
e = init_entry_decision(ObjectId(:entry), switches, st);
# println("\n------------")
# println(e);
J = n_types(e);
nc = n_colleges(e);
nl = n_locations(e);
vWork_jV, vCollege_jcM = CollegeEntry.values_for_test(rng, J, nc, nl);
admitV = [1, 3];
rejectV = [2];
prob_jcM, eVal_jV = entry_probs(e, vWork_jV, vCollege_jcM, admitV;
prefShocks = true);
@test all(prob_jcM .>= 0.0)
@test all(prob_jcM .<= 1.0)
@test size(prob_jcM) == (J, nc)
@test size(eVal_jV) == (J,)
@test all(prob_jcM[:, rejectV] .== 0.0)
prob2_jcM, eVal2_jV = entry_probs(e, vWork_jV, vCollege_jcM, admitV;
prefShocks = false);
@test isapprox(prob_jcM, prob2_jcM, atol = 0.01)
@test isapprox(eVal_jV, eVal2_jV, rtol = 0.01)
end
end
function sim_entry_test(switches)
@testset "Simulate Entry probs" begin
rng = MersenneTwister(123);
F1 = Float64;
st = ce.make_test_symbol_table();
e = init_entry_decision(ObjectId(:entry), switches, st);
# println("\n------------")
# println(e);
J = n_types(e);
nc = n_colleges(e);
nl = n_locations(e);
vCollege_clM = test_value_cl(rng, nc, nl);
vWork = 1.0 .+ sum(vCollege_clM) / length(vCollege_clM);
avail_clM = rand(rng, Bool, nc, nl);
# Make sure one college is available
avail_clM[1,1] = true;
prob_clV, eVal = entry_probs(e, vWork, vec(vCollege_clM), vec(avail_clM));
prob_clM = reshape(prob_clV, nc, nl);
@test !any(prob_clM[.!avail_clM] .> 0.0)
# Tests expected value by simulation
nSim = Int(1e5);
prob2_clM, eVal2 = CollegeEntry.sim_entry_probs(e,
vWork, vCollege_clM, avail_clM,
nSim, rng);
@test !any(prob2_clM[.!avail_clM] .> 0.0)
@test isapprox(eVal, eVal2, atol = 1e-4)
# @show eVal, eVal2;
@test isapprox(prob_clM, prob2_clM, atol = 0.02)
end
end
function scale_entry_probs_test()
rng = MersenneTwister(123);
@testset "Scale entry probs" begin
J = 30; nl = 4; nc = 5;
entryProb_jlcM = make_test_entry_probs(rng, J, nc, nl);
minEntryProb = 0.03;
maxEntryProb = 0.8;
scale_entry_probs!(entryProb_jlcM, minEntryProb, maxEntryProb);
entryProb_jlM = sum(entryProb_jlcM, dims = 3);
@test all(entryProb_jlM .< maxEntryProb + 0.01)
@test all(entryProb_jlcM .> 0.0)
# With probs in range, nothing should change
entryProb_jlcM = fill(minEntryProb + 0.01, J, nl, nc);
entryProb2_jlcM = copy(entryProb_jlcM);
scale_entry_probs!(entryProb_jlcM, minEntryProb, maxEntryProb);
@test isapprox(entryProb_jlcM, entryProb2_jlcM)
end
end
@testset "All" begin
constructor_test()
scale_entry_probs_test();
J = 8; nc = 3;
for switches in test_entry_switches(J, nc)
access_test(switches);
subset_switches_test(switches);
for prefShocks ∈ (true, false)
entry_test(switches, prefShocks);
small_pref_entry_test(switches);
end
sim_entry_test(switches);
end
end
# -------------- |
proofpile-julia0005-42450 | {
"provenance": "014.jsonl.gz:242451"
} |
abstract type SDEIntegratorCache{DT,D,M} <: IntegratorCache{DT,D} end
abstract type PSDEIntegratorCache{DT,D,M} <: IntegratorCache{DT,D} end
|
proofpile-julia0005-42451 | {
"provenance": "014.jsonl.gz:242452"
} | abstract type AbstractThrustCoefficientModel end
"""
ThrustModelConstantCt(ct::Float)
Stores a constant ct value for wake calculations
# Arguments
- `ct::Float`: a constant ct value for computation
"""
struct ThrustModelConstantCt{TF} <: AbstractThrustCoefficientModel
ct::TF
end
"""
ThrustModelCtPoints(vel_points, ct_points)
Stores the thrust coefficient curve in terms of corresponding velocity and thrust
coefficient points. ct and velocity points should be in the same order and ordered from
lowest wind speed to highest wind speed.
# Arguments
- `inflow_velocity::Float`: inflow velocity of the wind turbine
- `thrust_model::ThrustModelCtPoints`: Struct containing ct and velocity points for ct curve
"""
struct ThrustModelCtPoints{ATF} <: AbstractThrustCoefficientModel
vel_points::ATF
ct_points::ATF
end
"""
calculate_ct(model::ThrustModelConstantCt)
Calculate the thrust coefficient for a wind turbine based on a pre-determined constant ct
# Arguments
- `inflow_velocity::Float`: inflow velocity of the wind turbine (unused for const. ct)
- `thrust_model::ThrustModelConstantCt`: struct containing a constant ct value for computation
"""
function calculate_ct(inflow_velocity, thrust_model::ThrustModelConstantCt)
return thrust_model.ct
end
"""
calculate_ct(inflow_velocity, thrust_model::ThrustModelCtPoints)
Calculate the thrust coefficient for a wind turbine based on a pre-determined ct curve
with linear interpolation.
# Arguments
- `inflow_velocity::Float`: inflow velocity of the wind turbine
- `thrust_model::ThrustModelCtPoints`: Struct containing ct and velocity points for ct curve
"""
function calculate_ct(inflow_velocity, thrust_model::ThrustModelCtPoints)
minv = thrust_model.vel_points[1]
maxv = thrust_model.vel_points[end]
minct = thrust_model.ct_points[1]
maxct = thrust_model.ct_points[end]
if inflow_velocity < minv
ct = minct
elseif inflow_velocity < maxv
ct = linear(thrust_model.vel_points, thrust_model.ct_points, inflow_velocity)
else
ct = maxct
end
if ct > 1
ct = 1.0
end
return ct
end
"""
_ct_to_axial_ind_func(ct)
Calculate axial induction from the thrust coefficient
# Arguments
- `ct::Float`: thrust coefficient
"""
function _ct_to_axial_ind_func(ct)
# initialize axial induction to zero
axial_induction = 0.0
# calculate axial induction
if ct > 0.96 # Glauert condition
axial_induction = 0.143 + sqrt(0.0203 - 0.6427*(0.889 - ct))
else
axial_induction = 0.5*(1.0 - sqrt(1.0 - ct))
end
return axial_induction
end
|
proofpile-julia0005-42452 | {
"provenance": "014.jsonl.gz:242453"
} | using CRCBS
using GraphUtils
using TOML
using Test, Logging
using Parameters
"""
FatPathsSolver{S}
A simple wrapper around a MAPF solver so that profile_solver! will dispatch
correctly (by transforming adding the fat paths cost to the problem before
solving it).
"""
@with_kw struct FatPathsSolver{S,M} <: SolverWrapper
solver::S = CBSSolver(AStar{NTuple{2,Float64}}())
fat_path_cost_model::M = FlatFPCost()
end
function CRCBS.profile_solver!(solver::FatPathsSolver,mapf)
fp_mapf = CRCBS.init_fat_path_mapf(mapf,solver.fat_path_cost_model)
CRCBS.profile_solver!(solver.solver,fp_mapf)
end
# CRCBS.get_logger(solver::FatPathsSolver) = get_logger(solver.solver)
# CRCBS.low_level(solver::FatPathsSolver) = low_level(solver.solver)
# Problem Instances
base_scen_path = joinpath(ENV["HOME"],"Repos/mapf_benchmarks/scenarios")
map_path = joinpath(ENV["HOME"],"Repos/mapf_benchmarks/maps/")
results_path = "/scratch/mapf_experiments"
EXPERIMENTS_DIR = "/scratch/mapf_experiments/fat_path_experiments"
PROBLEM_DIR = joinpath(EXPERIMENTS_DIR,"problems")
RESULTS_DIR = joinpath(EXPERIMENTS_DIR,"results")
config = (
solver_configs = [
(
solver=CBSSolver(),
results_path=joinpath(RESULTS_DIR,"CBSSolver")
),
(
solver=FatPathsSolver(),
results_path=joinpath(RESULTS_DIR,"FatPathsSolver")
),
(
solver=FatPathsSolver(fat_path_cost_model=NormalizedFPCost()),
results_path=joinpath(RESULTS_DIR,"NormalizedFatPathsSolver")
),
],
problem_dir = PROBLEM_DIR,
feats = [
RunTime(),IterationCount(),SolutionCost(),NumConflicts(),RobotPaths(),
RobotSeparation(),
TimeOutStatus(),IterationMaxOutStatus(),
# MemAllocs(),ByteCount()
]
)
for solver_config in config.solver_configs
set_runtime_limit!(solver_config.solver,50)
set_verbosity!(solver_config.solver,1)
set_iteration_limit!(solver_config.solver,10000)
set_iteration_limit!(low_level(solver_config.solver),1000)
end
# scen_paths = get_files_matching(base_scen_path,".scen",["Berlin_1_256","Paris_1_256"])
scen_paths = get_files_matching(base_scen_path,".scen",[
"empty-8-8-even",
# "empty-16-16-even",
# "empty-32-32-even",
# "empty-48-48-even",
])
BenchmarkInterface.generate_problem_files_from_moving_ai(
scen_paths,
# [joinpath(base_scen_path,"scen-even","empty-8-8-even-10.scen")],
map_path,
PROBLEM_DIR
)
loader = BenchmarkInterface.init_mapf_loader(PROBLEM_DIR)
BenchmarkInterface.profile_with_skipping!(config,loader)
# run_profiling(config,loader)
|
proofpile-julia0005-42453 | {
"provenance": "014.jsonl.gz:242454"
} | using Test
using TestSetExtensions
using LinearAlgebra
using Qaintessent
using SparseArrays
using StatsBase
##==----------------------------------------------------------------------------------------------------------------------
isunitary(cg::CircuitGate) = (sparse_matrix(cg) * sparse_matrix(Base.adjoint(cg)) ≈ I)
@testset ExtendedTestSet "circuit gates" begin
θ = 0.7 * π
ϕ = 0.4 * π
n = randn(3); n /= norm(n)
# single qubit gates
@testset "single qubit circuit gates" begin
for g in [X, Y, Z, HadamardGate(), SGate(), TGate(), RxGate(θ), RyGate(θ), RzGate(θ), RotationGate(θ, n), PhaseShiftGate(ϕ)]
cg = CircuitGate((2,), g)
cgadj = adjoint(cg)
Qaintessent.sparse_matrix(cgadj.gate) == adjoint(Qaintessent.sparse_matrix(cg.gate))
@test LinearAlgebra.ishermitian(cg) == (Qaintessent.sparse_matrix(cg) == Qaintessent.sparse_matrix(adjoint(cg)))
end
cgs = circuit_gate.((2,), [X, Y, Z, HadamardGate(), SGate(), TGate(), RxGate(θ), RyGate(θ), RzGate(θ), RotationGate(θ, n), PhaseShiftGate(ϕ)])
@test all(sparse_matrix(adjoint(cgs)) .≈ adjoint(sparse_matrix(cgs)))
end
# two qubit gates
@testset "two qubit circuit gates" begin
for g in [EntanglementXXGate(θ), EntanglementYYGate(θ), EntanglementZZGate(θ), controlled_not(), SwapGate()]
cg = CircuitGate((2, 3), g)
cgadj = adjoint(cg)
Qaintessent.sparse_matrix(cgadj.gate) == adjoint(Qaintessent.sparse_matrix(cg.gate))
@test LinearAlgebra.ishermitian(cg) == (Qaintessent.sparse_matrix(cg) == Qaintessent.sparse_matrix(adjoint(cg)))
end
end
# Y acting on second wire
@testset "apply circuit gate to second wire" begin
cg = CircuitGate((2,), Y)
@test Qaintessent.sparse_matrix(cg, 3) ≈ kron(Matrix(I, 2, 2), Qaintessent.matrix(Y), Matrix(I, 2, 2))
@test isunitary(cg)
end
# flip control and target
@testset "flip control and target circuit gate" begin
cg = CircuitGate((2, 1), controlled_not())
@test Qaintessent.sparse_matrix(cg) ≈ [1 0 0 0; 0 0 0 1; 0 0 1 0; 0 1 0 0]
@test isunitary(cg)
end
# third qubit as control and first qubit as target
@testset "shift control and target circuit gate" begin
cg = circuit_gate(1, HadamardGate(), 3)
@test Qaintessent.sparse_matrix(cg) ≈ [
Matrix(I, 4, 4) fill(0, 4, 2) fill(0, 4, 2);
fill(0, 2, 4) Qaintessent.matrix(HadamardGate()) fill(0, 2, 2);
fill(0, 2, 6) Qaintessent.matrix(HadamardGate())]
@test isunitary(cg)
end
@testset "circuit gate exceptions" begin
H = HadamardGate()
S = SwapGate()
N = 2
@test_throws ErrorException("SwapGate affects 2 wires but 0 wires, (), were passed.") CircuitGate{0,SwapGate}(NTuple{0,Int}(), S)
@test_throws ErrorException("Wire indices must be unique.") CircuitGate{2,SwapGate}((1, 1), S)
@test_throws ErrorException("Wire index cannot be smaller than 1.") CircuitGate{2,SwapGate}((1, -1), S)
end
end
##==----------------------------------------------------------------------------------------------------------------------
@testset ExtendedTestSet "circuit gate isapprox" begin
θ = 0.7 * π
ϕ = 0.4 * π
n = randn(3); n /= norm(n)
ϵ = 3*sqrt(eps())
sqg = [RxGate(θ), RyGate(θ), RzGate(θ), RotationGate(θ, n), PhaseShiftGate(ϕ)]
sqḡ = [RxGate(θ + eps()), RyGate(θ + eps()), RzGate(θ + eps()), RotationGate(θ + eps(), n), PhaseShiftGate(ϕ + eps())]
sqĝ = [RxGate(θ + ϵ), RyGate(θ + ϵ), RzGate(θ + ϵ), RotationGate(θ + ϵ, n), PhaseShiftGate(ϕ + ϵ)]
for (i, g) in enumerate(sqg)
cg1 = CircuitGate((2,), sqg[i])
cg2 = CircuitGate((2,), sqḡ[i])
cg3 = CircuitGate((2,), sqĝ[i])
@test cg1 ≈ cg2
@test !(cg1 ≈ cg3)
end
end
##==----------------------------------------------------------------------------------------------------------------------
@testset ExtendedTestSet "circuit gate helper functions" begin
N = 5
@testset ExtendedTestSet "circuit gate single qubit helper function" begin
iwire = rand(1:N)
g = XGate()
@test circuit_gate(iwire, g) ≈ CircuitGate((iwire,), g)
end
@testset "circuit gate two qubit helper function" begin
iwire1 = rand(1:N)
iwire2 = rand(vcat(1:iwire1 - 1..., iwire1 + 1:N...))
g2 = SwapGate()
@test circuit_gate(iwire1, iwire2, g2) ≈ CircuitGate((iwire1, iwire2), g2)
end
@testset "circuit gate controlled gate helper function" begin
cntrl_iwire1, cntrl_iwire2, targ_iwire1, targ_iwire2 = sample(1:N, 4, replace=false)
g = YGate()
ref_cg = CircuitGate((targ_iwire1, cntrl_iwire1), ControlledGate(g, 1))
@test circuit_gate(targ_iwire1, g, cntrl_iwire1) ≈ ref_cg
@test circuit_gate(targ_iwire1, g, (cntrl_iwire1,)) ≈ ref_cg
@test circuit_gate((targ_iwire1,), g, cntrl_iwire1) ≈ ref_cg
@test circuit_gate((targ_iwire1,), g, (cntrl_iwire1,)) ≈ ref_cg
g = SwapGate()
ref_cg2 = CircuitGate((targ_iwire1, targ_iwire2, cntrl_iwire1), ControlledGate(g, 1))
@test circuit_gate(targ_iwire1, targ_iwire2, g, cntrl_iwire1) ≈ ref_cg2
@test circuit_gate(targ_iwire1, targ_iwire2, g, (cntrl_iwire1,)) ≈ ref_cg2
@test circuit_gate((targ_iwire1, targ_iwire2), g, cntrl_iwire1) ≈ ref_cg2
@test circuit_gate((targ_iwire1, targ_iwire2), g, (cntrl_iwire1,)) ≈ ref_cg2
ref_cg3 = CircuitGate((targ_iwire1, targ_iwire2, cntrl_iwire1, cntrl_iwire2), ControlledGate(g, 2))
@test circuit_gate(targ_iwire1, targ_iwire2, g, cntrl_iwire1, cntrl_iwire2) ≈ ref_cg3
@test circuit_gate(targ_iwire1, targ_iwire2, g, (cntrl_iwire1, cntrl_iwire2)) ≈ ref_cg3
@test circuit_gate((targ_iwire1, targ_iwire2), g, cntrl_iwire1, cntrl_iwire2) ≈ ref_cg3
@test circuit_gate((targ_iwire1, targ_iwire2), g, (cntrl_iwire1, cntrl_iwire2)) ≈ ref_cg3
end
# test sparse_matrix
@testset "circuit gates sparse matrix" begin
cgs = [circuit_gate(1, X), circuit_gate(2, X), circuit_gate(3, X)]
m = sparse_matrix(cgs)
@test m ≈ sparse([8, 7, 6, 5, 4, 3, 2, 1], [1, 2, 3, 4, 5, 6, 7, 8], Float64[1, 1, 1, 1, 1, 1, 1, 1])
end
# test sparse_matrix
@testset "circuit gates sparse matrix exceptions" begin
cgs = [circuit_gate(1, X), circuit_gate(2, X), circuit_gate(3, X)]
@test_throws ErrorException("Circuit size `2` too small; vector of CircuitGate requires 3 wires.") sparse_matrix(cgs, 2)
end
end
|
proofpile-julia0005-42454 | {
"provenance": "014.jsonl.gz:242455"
} | abstract type Estimator end
Base.iterate(estimator::Estimator, state = 0) = state > 0 ? nothing : (estimator, state + 1)
Base.length(estimator::Estimator) = 1
Base.show(io::IO, estimator::Estimator) = print(io, string(estimator))
Base.show(io::IO, ::MIME"application/prs.juno.inline", estimator::Estimator) = print(io, string(estimator))
(::Estimator)(x1::Integer, x2::Integer, design::AbstractDesign) = error("not implemented")
function estimate(estimator::TE, x1::TI, x2::TI, design::TD) where {TE<:Estimator,TI<:Integer,TD<:AbstractDesign}
return estimator(x1, x2, design)
end
string(estimator::Estimator) = error("not implemented")
function bias(p::Real, estimator::Estimator, design::AbstractDesign)
0 <= p <= 1 ? nothing : error("p must be between 0 and 1")
XX = sample_space(design)
x1, x2 = XX[:,1], XX[:,2]
return (estimator.(x1, x2, design) .- p) .*
pmf.(x2, n2.(design, x1), p) .*
pmf.(x1, n1(design), p) |>
sum
end
function mean_squared_error(p::Real, estimator::Estimator, design::AbstractDesign)
0 <= p <= 1 ? nothing : error("p must be between 0 and 1")
XX = sample_space(design)
x1, x2 = XX[:,1], XX[:,2]
return (estimator.(x1, x2, design) .- p).^2 .*
pmf.(x2, n2.(design, x1), p) .*
pmf.(x1, n1(design), p) |>
sum
end
function mean_absolute_error(p::Real, estimator::Estimator, design::AbstractDesign)
0 <= p <= 1 ? nothing : error("p must be between 0 and 1")
XX = sample_space(design)
x1, x2 = XX[:,1], XX[:,2]
return abs.(estimator.(x1, x2, design) .- p) .*
pmf.(x2, n2.(design, x1), p) .*
pmf.(x1, n1(design), p) |>
sum
end
|
proofpile-julia0005-42455 | {
"provenance": "014.jsonl.gz:242456"
} | module Mod1DynamicDefinition
using ValidatedNumerics
using ..DynamicDefinition, ..Contractors
using ..DynamicDefinition: derivative
export Mod1Dynamic, preim, nbranches, plottable
"""
Defines a Dynamic on [0,1] as the Mod-1 quotient of a given map.
An alternative newer implementation relying on piecewise-defined functions is in `mod1_dynamic`
"""
struct Mod1Dynamic{FT} <: MarkovDynamic
T::FT
nbranches::Int
orientation::Float64
domain::Interval{Float64}
is_full_branch::Bool
end
Mod1Dynamic(T::FT, nbranches = undef, domain = Interval{Float64}(0,1)) where {FT} = Mod1Dynamic{FT}(T, nbranches, domain)
function Mod1Dynamic{FT}(T, nbranches = undef, domain = Interval{Float64}(0,1)) where {FT}
@assert domain == 0..1 # TODO: this only works for domain == 0..1, for now
range_diff = T(@interval(1.))-T(@interval(0.))
orientation = unique_sign(range_diff)
nbranches = ceil(orientation * range_diff).hi
is_full_branch = isinteger(T(0..0)) & isinteger(T(1..1))
return Mod1Dynamic{FT}(T, nbranches, orientation, domain, is_full_branch)
end
DynamicDefinition.domain(S::Mod1Dynamic{FT}) where {FT} = S.domain
DynamicDefinition.nbranches(S::Mod1Dynamic{FT}) where {FT} =S.nbranches
DynamicDefinition.is_full_branch(S::Mod1Dynamic{FT}) where {FT} = S.is_full_branch
# TODO: serious doubts that this works if T(0) is not an integer...
function DynamicDefinition.preim(D::Mod1Dynamic{FT}, k, y, ϵ) where {FT}
# we need to treat the case with the other orientation, 0 not fixed point...
@assert 1 <= k <= D.nbranches
f(x) = D.T(x)-D.T(0)-(y-D.T(0)+(k-1)*D.orientation)
root(f, D.domain, ϵ)
end
DynamicDefinition.derivative(n, D::Mod1Dynamic{FT}, x) where {FT} = derivative(n, D.T, x)
DynamicDefinition.distorsion(D::Mod1Dynamic{FT}, x) where {FT} = distorsion(D.T, x)
DynamicDefinition.max_distorsion(D::Mod1Dynamic{FT}, tol=1e-3) where {FT} = maximise(x -> distorsion(D.T, x), domain(D), tol=tol)[1]
DynamicDefinition.expansivity(D::Mod1Dynamic{FT}, tol=1e-3) where {FT} = maximise(x -> abs(1/derivative(D, x)), domain(D), tol=tol)[1]
function DynamicDefinition.plottable(D::Mod1Dynamic{FT}, x) where {FT}
@assert 0 <= x <= 1
return mod(D.T(x), 1.)
end
using RecipesBase
@recipe f(::Type{Mod1Dynamic{FT}}, D::Mod1Dynamic{FT}) where {FT} = x -> plottable(D, x)
orientation(D::Mod1Dynamic, k) = D.orientation
end
|
proofpile-julia0005-42456 | {
"provenance": "014.jsonl.gz:242457"
} | const berstandata = [
Dict(
"N" => 10,
"y" => [0,1,0,0,0,0,0,0,0,1]
)
]
|
proofpile-julia0005-42457 | {
"provenance": "014.jsonl.gz:242458"
} | mutable struct MinimaxObjectiveCore
points::Vector{Float64}
targets::Vector{Float64}
IIF::Matrix{Float64}
MinimaxObjectiveCore() =
new(Vector{Float64}(), Vector{Float64}(), Matrix{Float64}(undef, 0, 0))
end
|
proofpile-julia0005-42458 | {
"provenance": "014.jsonl.gz:242459"
} | #!/usr/bin/env julia
# Reproduce the VTK_BEZIER_TETRA_quartic_solidSphereOctant.vtu file from the VTK
# test suite.
using WriteVTK
using Test
const VTK_BASENAME = "bezier_tetra_quartic_solidSphereOctant"
function main()
cell_type = VTKCellTypes.VTK_BEZIER_TETRAHEDRON
# Copied from VTK generated file.
points_in = [
1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 0, 0,
1, 0.4226497411727905, 0, 0.7886751294136047, 0.7886751294136047, 0,
0.4226497411727905, 1, 0, 0, 1, 0.4226497411727905,
0, 0.7886751294136047, 0.7886751294136047, 0, 0.4226497411727905, 1,
0.4226497411727905, 0, 1, 0.7886751294136047, 0, 0.7886751294136047,
1, 0, 0.4226497411727905, 0.75, 0, 0,
0.5, 0, 0, 0.25, 0, 0,
0, 0.75, 0, 0, 0.5, 0,
0, 0.25, 0, 0, 0, 0.75,
0, 0, 0.5, 0, 0, 0.25,
0.5, 0.25, 0, 0.25, 0.5, 0,
0.25, 0.25, 0, 0, 0.25, 0.5,
0, 0.25, 0.25, 0, 0.5, 0.25,
0.5, 0, 0.25, 0.25, 0, 0.25,
0.25, 0, 0.5, 1, 0.5893534421920776, 0.5893534421920776,
0.5893534421920776, 0.5893534421920776, 1, 0.5893534421920776, 1, 0.5893534421920776,
0.30000001192092896, 0.30000001192092896, 0.30000001192092896,
]
points = reshape(points_in, 3, :)
connectivity = 1:size(points, 2)
rational_weights = [
1, 1, 1, 1, 0.8365163037378083, 0.7886751345948131,
0.8365163037378083, 0.8365163037378083, 0.7886751345948131, 0.8365163037378083, 0.8365163037378083, 0.7886751345948131,
0.8365163037378083, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1,
1, 0.6594659464934113, 0.6594659464934113, 0.6594659464934113, 1,
]
cells = [MeshCell(cell_type, connectivity)]
outfiles = vtk_grid(VTK_BASENAME, points, cells) do vtk
vtk["RationalWeights"] = rational_weights
end
println("Saved: ", join(outfiles, " "))
outfiles
end
main()
|
proofpile-julia0005-42459 | {
"provenance": "014.jsonl.gz:242460"
} | using CairoMakie
using Colors
using Makie.GeometryBasics
using JLD2
training_data = [
(-5e-4, 5e-8),
(-3.5e-4, 5e-8),
(-2e-4, 5e-8),
]
train_files = [
"wind_-5e-4_diurnal_5e-8"
"wind_-3.5e-4_diurnal_5e-8"
"wind_-2e-4_diurnal_5e-8"
]
interpolating_data = [
(-4.5e-4, 4e-8),
(-4.5e-4, 2e-8),
(-3e-4, 4e-8),
(-3e-4, 2e-8),
]
interpolating_files = [
"wind_-4.5e-4_diurnal_4e-8"
"wind_-4.5e-4_diurnal_2e-8"
"wind_-3e-4_diurnal_4e-8"
"wind_-3e-4_diurnal_2e-8"
]
extrapolating_data_diurnal = [
(-5.5e-4, 5.5e-8),
(-1.5e-4, 5.5e-8),
]
extrapolating_diurnal_files = [
"wind_-5.5e-4_diurnal_5.5e-8"
"wind_-1.5e-4_diurnal_5.5e-8"
]
extrapolating_data = [
(-1.5e-4, 3.5e-8),
(-5.5e-4, 3.5e-8),
(-5.5e-4, 0),
(-5.5e-4, -3.5e-8),
(-1.5e-4, -3.5e-8),
]
extrapolating_files = [
"wind_-1.5e-4_cooling_3.5e-8"
"wind_-5.5e-4_cooling_3.5e-8"
"wind_-5.5e-4_new"
"wind_-5.5e-4_heating_-3.5e-8"
"wind_-1.5e-4_heating_-3.5e-8"
]
momentum_fluxes_training = [data[1] for data in training_data]
buoyancy_fluxes_training = [data[2] for data in training_data]
momentum_fluxes_interpolating = [data[1] for data in interpolating_data]
buoyancy_fluxes_interpolating = [data[2] for data in interpolating_data]
momentum_fluxes_extrapolating = [data[1] for data in extrapolating_data]
buoyancy_fluxes_extrapolating = [data[2] for data in extrapolating_data]
momentum_fluxes_extrapolating_diurnal = [data[1] for data in extrapolating_data_diurnal]
buoyancy_fluxes_extrapolating_diurnal = [data[2] for data in extrapolating_data_diurnal]
fig = CairoMakie.Figure(resolution=(1000, 650))
ax =fig[1,1] = CairoMakie.Axis(fig, xlabel="Buoyancy Flux / m² s⁻³", ylabel="Momentum Flux / m² s⁻²")
color_palette = distinguishable_colors(4, [RGB(1,1,1), RGB(0,0,0)], dropseed=true)
rectangle = CairoMakie.poly!(ax, Point2f0[(-5e-8, -2e-4), (-5e-8, -5e-4), (5e-8, -5e-4), (5e-8, -2e-4)], color=("paleturquoise3", 0.5))
training_lines = [CairoMakie.lines!(ax, [-buoyancy_fluxes_training[i], buoyancy_fluxes_training[i]],
[momentum_fluxes_training[i], momentum_fluxes_training[i]], color=color_palette[1]) for i in 1:length(training_data)]
interpolating_lines = [CairoMakie.lines!(ax, [-buoyancy_fluxes_interpolating[i], buoyancy_fluxes_interpolating[i]],
[momentum_fluxes_interpolating[i], momentum_fluxes_interpolating[i]], color=color_palette[2]) for i in 1:length(interpolating_data)]
extrapolating_lines = [CairoMakie.lines!(ax, [-buoyancy_fluxes_extrapolating_diurnal[i], buoyancy_fluxes_extrapolating_diurnal[i]],
[momentum_fluxes_extrapolating_diurnal[i], momentum_fluxes_extrapolating_diurnal[i]], color=color_palette[3]) for i in 1:length(extrapolating_data_diurnal)]
extrapolating_points = CairoMakie.scatter!(ax, buoyancy_fluxes_extrapolating, momentum_fluxes_extrapolating, color=color_palette[3])
# diurnal_line_1 = CairoMakie.lines!(ax, [-5.5e-8, 5.5e-8], [-1.5e-4, -1.5e-4], color=color_palette[4])
# diurnal_line_2 = CairoMakie.lines!(ax, [-5.5e-8, 5.5e-8], [-5.5e-4, -5.5e-4], color=color_palette[4])
# # diurnal_points = CairoMakie.scatter!(ax, buoyancy_fluxes_diurnal, momentum_fluxes_diurnal, color=color_palette[4])
legend = fig[2,1] = CairoMakie.Legend(fig, [training_lines[1], interpolating_lines[1], extrapolating_lines[1], extrapolating_points, rectangle],
["Training", "Interpolating", "Extrapolating", "Constant Fluxes", "Interpolation Region"], orientation=:horizontal)
rowsize!(fig.layout, 1, CairoMakie.Relative(0.95))
trim!(fig.layout)
fig
save("final_results/data_diurnal.png", fig, px_per_unit = 4)
T_loss(data) = sum(data) / 1153
losses_training = []
for train_file in train_files
file = jldopen("final_results/3sim_diurnal/train_$(train_file)/profiles_fluxes_oceananigans.jld2")
data = file["NDE_profile"]
close(file)
loss = T_loss(data["T_losses"])
loss_mpp = T_loss(data["T_losses_modified_pacanowski_philander"])
loss_kpp = T_loss(data["T_losses_kpp"])
loss_min = argmin([loss, loss_mpp, loss_kpp])
loss_str = ["NDE", "mpp", "kpp"]
push!(losses_training, loss_str[loss_min])
end
losses_training
losses_interpolating = []
for interpolating_file in interpolating_files
file = jldopen("final_results/3sim_diurnal/test_$(interpolating_file)/profiles_fluxes_oceananigans.jld2")
data = file["NDE_profile"]
close(file)
loss = T_loss(data["T_losses"])
loss_mpp = T_loss(data["T_losses_modified_pacanowski_philander"])
loss_kpp = T_loss(data["T_losses_kpp"])
@show loss, loss_mpp, loss_kpp
loss_min = argmin([loss, loss_mpp, loss_kpp])
loss_str = ["NDE", "mpp", "kpp"]
push!(losses_interpolating, loss_str[loss_min])
end
losses_interpolating
losses_extrapolating = []
for extrapolating_file in extrapolating_files
file = jldopen("final_results/3sim_diurnal/test_$(extrapolating_file)/profiles_fluxes_oceananigans.jld2")
data = file["NDE_profile"]
close(file)
loss = T_loss(data["T_losses"])
loss_mpp = T_loss(data["T_losses_modified_pacanowski_philander"])
loss_kpp = T_loss(data["T_losses_kpp"])
@show loss, loss_mpp, loss_kpp
loss_min = argmin([loss, loss_mpp, loss_kpp])
loss_str = ["NDE", "mpp", "kpp"]
push!(losses_extrapolating, loss_str[loss_min])
end
losses_extrapolating
losses_extrapolating_diurnal = []
for diurnal_file in extrapolating_diurnal_files
file = jldopen("final_results/3sim_diurnal/test_$(diurnal_file)/profiles_fluxes_oceananigans.jld2")
data = file["NDE_profile"]
close(file)
loss = T_loss(data["T_losses"])
loss_mpp = T_loss(data["T_losses_modified_pacanowski_philander"])
loss_kpp = T_loss(data["T_losses_kpp"])
@show loss, loss_mpp, loss_kpp
loss_min = argmin([loss, loss_mpp, loss_kpp])
loss_str = ["NDE", "mpp", "kpp"]
push!(losses_extrapolating_diurnal, loss_str[loss_min])
end
losses_extrapolating_diurnal
fig = CairoMakie.Figure(resolution=(1000, 650))
ax =fig[1,1] = CairoMakie.Axis(fig, xlabel="Buoyancy Flux / m² s⁻³", ylabel="Momentum Flux / m² s⁻²")
color_palette = distinguishable_colors(4, [RGB(1,1,1), RGB(0,0,0)], dropseed=true)
loss_colors = Dict(
"NDE" => color_palette[1],
"mpp" => color_palette[3],
"kpp" => color_palette[4],
)
rectangle = CairoMakie.poly!(ax, Point2f0[(-5e-8, -2e-4), (-5e-8, -5e-4-3f-6), (5e-8, -5e-4-3f-6), (5e-8, -2e-4)], color=("paleturquoise3", 0.5))
NDE_line = CairoMakie.lines!(ax, [buoyancy_fluxes_training[1], buoyancy_fluxes_training[1]], [momentum_fluxes_training[1], momentum_fluxes_training[1]], color=loss_colors["NDE"])
mpp_line = CairoMakie.lines!(ax, [buoyancy_fluxes_training[1], buoyancy_fluxes_training[1]], [momentum_fluxes_training[1], momentum_fluxes_training[1]], color=loss_colors["mpp"])
kpp_line = CairoMakie.lines!(ax, [buoyancy_fluxes_training[1], buoyancy_fluxes_training[1]], [momentum_fluxes_training[1], momentum_fluxes_training[1]], color=loss_colors["kpp"])
training_lines = [CairoMakie.lines!(ax, [-buoyancy_fluxes_training[i], buoyancy_fluxes_training[i]],
[momentum_fluxes_training[i], momentum_fluxes_training[i]], color=loss_colors[losses_training[i]]) for i in 1:length(losses_training)]
interpolating_lines = [CairoMakie.lines!(ax, [-buoyancy_fluxes_interpolating[i], buoyancy_fluxes_interpolating[i]],
[momentum_fluxes_interpolating[i], momentum_fluxes_interpolating[i]], color=loss_colors[losses_interpolating[i]]) for i in 1:length(losses_interpolating)]
extrapolating_lines = [CairoMakie.lines!(ax, [-buoyancy_fluxes_extrapolating[i], buoyancy_fluxes_extrapolating[i]],
[momentum_fluxes_extrapolating[i], momentum_fluxes_extrapolating[i]], color=loss_colors[losses_extrapolating[i]]) for i in 1:length(losses_extrapolating_diurnal)]
extrapolating_points = [CairoMakie.scatter!(ax, [buoyancy_fluxes_extrapolating[i]], [momentum_fluxes_extrapolating[i]], color=loss_colors[losses_extrapolating[i]]) for i in 1:length(losses_extrapolating)]
# training_points = [CairoMakie.scatter!(ax, [buoyancy_fluxes_training[i]], [momentum_fluxes_training[i]], color=loss_colors[losses_training[i]]) for i in 1:length(losses_training)]
# interpolating_points = [CairoMakie.scatter!(ax, [buoyancy_fluxes_interpolating[i]], [momentum_fluxes_interpolating[i]], color=loss_colors[losses_interpolating[i]]) for i in 1:length(losses_interpolating)]
# extrapolating_points = [CairoMakie.scatter!(ax, [buoyancy_fluxes_extrapolating[i]], [momentum_fluxes_extrapolating[i]], color=loss_colors[losses_extrapolating[i]]) for i in 1:length(losses_extrapolating)]
# # extrapolating_points = CairoMakie.scatter!(ax, buoyancy_fluxes_extrapolating, momentum_fluxes_extrapolating, color=color_palette[3])
# diurnal_line_1 = CairoMakie.lines!(ax, [-5.5e-8, 5.5e-8], [-5.5e-4, -5.5e-4], color=loss_colors[losses_diurnal[1]])
# diurnal_line_2 = CairoMakie.lines!(ax, [-5.5e-8, 5.5e-8], [-1.5e-4, -1.5e-4], color=loss_colors[losses_diurnal[2]])
legend = fig[2,1] = CairoMakie.Legend(fig, [mpp_line, kpp_line, NDE_line],
["Ri-based Diffusivity Only", "K-Profile Parameterisation", "NN Embedded in Oceananigans.jl"], orientation=:horizontal)
rowsize!(fig.layout, 1, CairoMakie.Relative(0.95))
trim!(fig.layout)
fig
save("final_results/data_diurnal_loss_results.png", fig, px_per_unit = 4) |
proofpile-julia0005-42460 | {
"provenance": "014.jsonl.gz:242461"
} | # using Pipe
using Test
_macroexpand(q) = macroexpand(Main, q)
rml! = Base.remove_linenums!
# performs linenum removal and temp variable replacing to avoid different names of temp variables in different julia versions
stringify_expr(e::Expr) = replace(string(rml!(e)), r"##\d{3}" => "##000")
pipe_equals(e1::Expr, e2::Expr) = stringify_expr(_macroexpand(e1)) == stringify_expr(e2)
@testset "`_` 1st location" begin
@test _macroexpand(:(@pipe a |> b(x))) == :((b(a, x))) #applying to function calls returning functions
@test _macroexpand(:(@pipe a |> bb[2])) == :(bb[a, 2]) #Should work with RHS that is a array reference
@test _macroexpand(:(@pipe a |> b(xb, _) |> c |> d(_, xd) |> e(xe) |> f(xf, _, yf) |> _[i])) == :((f(xf, e(d(c(b(xb, a)), xd), xe), yf))[i])
end
#Marked locations
@testset "Pipe main" begin
#No change to nonpipes functionality
@test _macroexpand(:(@pipe a)) == :a #doesn't change single inputs
@test _macroexpand(:(@pipe b(a))) == :(b(a)) #doesn't change inputs that a function applications
#Compatable with Julia 1.3 piping functionality
@test _macroexpand(:(@pipe a |> b)) == :(b(a)) #basic
@test _macroexpand(:(@pipe a |> b |> c)) == :(c(b(a))) #Keeps chaining 3
@test _macroexpand(:(@pipe a |> b |> c |> d)) == :(d(c(b(a)))) #Keeps chaining 4
# @test _macroexpand( :(@pipe a|>b(x)) ) == :((b(x))(a)) #applying to function calls returning functions
@test _macroexpand(:(@pipe a(x) |> b)) == :(b(a(x))) #feeding functioncall results on wards
@test _macroexpand(:(@pipe 1 |> a)) == :(a(1)) #Works with literals (int)
@test _macroexpand(:(@pipe "foo" |> a)) == :(a("foo")) #Works with literal (string)
# @test _macroexpand( :(@pipe a|>bb[2])) == :((bb[2])(a)) #Should work with RHS that is a array reference
@test _macroexpand(:(@pipe a |> _)) == :(a) #Identity works
@test _macroexpand(:(@pipe a |> _[b])) == :(a[b]) #Indexing works
@test _macroexpand(:(@pipe a |> b(_))) == :(b(a)) #Marked location only
@test _macroexpand(:(@pipe a |> b(x, _))) == :(b(x, a)) # marked 2nd (and last)
@test _macroexpand(:(@pipe a |> b(_, x))) == :(b(a, x)) # marked first
@test _macroexpand(:(@pipe a |> b(_, _))) == :(b(a, a)) # marked double (Not certain if this is a good idea)
@test _macroexpand(:(@pipe a |> bb[2](x, _))) == :((bb[2])(x, a)) #Should work with RHS that is a array reference
end
#Macros and blocks
macro testmacro(arg, n)
esc(:($arg + $n))
end
@test _macroexpand(:(@pipe a |> @testmacro _ 3)) == :(a + 3) # Can pipe into macros
@test _macroexpand(:(@pipe a |> begin
b = _
c + b + _
end)) == :(
begin
b = a
c + b + a
end)
#marked Unpacking
@test _macroexpand(:(@pipe a |> b(_...))) == :(b(a...)) # Unpacking
@test _macroexpand(:(@pipe a |> bb[2](_...))) == :((bb[2])(a...)) #Should work with RHS of arry ref and do unpacking
#Mixing modes
@test _macroexpand(:(@pipe a |> b |> c(_))) == :(c(b(a)))
@test _macroexpand(:(@pipe a |> b(x, _) |> c |> d(_, y))) == :(d(c(b(x, a)), y))
# @test _macroexpand( :(@pipe a|>b(xb,_)|>c|>d(_,xd)|>e(xe) |>f(xf,_,yf)|>_[i] ) ) == :(f(xf,(e(xe))(d(c(b(xb,a)),xd)),yf)[i]) #Very Complex
# broadcasting
@testset "Pipe broadcasting" begin
vars = 1:10 .|> y -> gensym() # Julia < 1.3 changes how Symbols are stringified so we compute the representation here
@test pipe_equals(:(@pipe 1:10 .|> _ * 2), :(1:10 .|> $(vars[1]) -> $(vars[1]) * 2))
@test pipe_equals(:(@pipe 1:10 .|> fn), :(1:10 .|> $(vars[2]) -> fn($(vars[2]))))
@test pipe_equals(:(@pipe a .|> fn .|> _ * 2), :(a .|> ($(vars[3]) -> fn($(vars[3]))) .|> ($(vars[4]) -> $(vars[4]) * 2)))
@test pipe_equals(:(@pipe a .|> fn |> _ * 2), :((a .|> $(vars[5]) -> fn($(vars[5]))) * 2))
@test pipe_equals(:(@pipe [1, 2, 2] |> atan.([10, 20, 30], _)), :(atan.([10, 20, 30], [1, 2, 2])))
@test pipe_equals(:(@pipe [1, 2, 2] .|> atan.([10, 20, 30], _)), :([1, 2, 2] .|> $(vars[6]) -> atan.([10, 20, 30], $(vars[6]))))
@test pipe_equals(:(@pipe fn |> _.(1:2)), :(fn.(1:2)))
@test pipe_equals(:(@pipe fn .|> _.(1:2)), :(fn .|> $(vars[7]) -> $(vars[7]).(1:2)))
@test pipe_equals(:(@pipe [true, false] .|> !), :([true, false] .|> $(vars[8]) -> !$(vars[8])))
@test pipe_equals(:(@pipe [1, 2] |> .+(_, x)), :([1, 2] .+ x))
@test pipe_equals(:(@pipe [1, 2] |> _ .+ x), :([1, 2] .+ x))
@test pipe_equals(:(@pipe [1, 2] .|> .+(_, x)), :([1, 2] .|> $(vars[9]) -> $(vars[9]) .+ x))
@test pipe_equals(:(@pipe [1, 2] .|> _ .+ x), :([1, 2] .|> $(vars[10]) -> $(vars[10]) .+ x))
end
|
proofpile-julia0005-42461 | {
"provenance": "014.jsonl.gz:242462"
} | using ClusterTrees, BEAST, CompScienceMeshes
a, h = 1.0, 0.25
Γ1 = meshsphere(a, h)
Γ2 = CompScienceMeshes.translate(Γ1, point(2.1,0,0))
X1 = lagrangecxd0(Γ1)
X2 = lagrangecxd0(Γ2)
p = positions(X1)
q = positions(X2)
p, tp, permp = clustertree(p)
q, tq, permq = clustertree(q)
function adm(b) # p and q are passed in implicitly
nmin = 20
I = b[1][1].begin_idx : b[1][1].end_idx-1
J = b[2][1].begin_idx : b[2][1].end_idx-1
length(I) < nmin && return true
length(J) < nmin && return true
ll1, ur1 = ClusterTrees.boundingbox(p[I]); c1 = (ll1+ur1)/2;
ll2, ur2 = ClusterTrees.boundingbox(q[J]); c2 = (ll2+ur2)/2;
diam1 = norm(ur1-c1)
diam2 = norm(ur2-c2)
dist12 = norm(c2-c1)
return dist12 >= η*max(diam1, diam2)
end
blocktree, η = (tp,tq), 2.0
P = admissable_partition(blocktree, adm)
# sanity check on the tree
for (τ,σ) in P
tree_size = 0
depthfirst(τ) do c,l
tree_size += 1
end
@assert τ[1].num_children+1 == tree_size
end
depthfirst(tp) do τ,l
I = τ[1].begin_idx : τ[1].end_idx-1
for c in children(τ)
J = c[1].begin_idx : c[1].end_idx-1
@assert J ⊆ I
end
end
# gather all the observer clusters that participate in an interaction pair
Q = Vector{typeof(P[1][1])}()
for (i,p) in enumerate(P)
τ1, _ = p[1], p[2]
I = τ1[1].begin_idx : τ1[1].end_idx-1
descendant = false
for q in P
τ2, _ = q[1], q[2]
J = τ2[1].begin_idx : τ2[1].end_idx-1
# Can τ2 be reached from τ1?
descendant = ((J != I) && (J ⊆ I))
!isempty(intersect(I,J)) && !(J ⊆ I) && !(I ⊆ J) && (@show I J)
descendant && break
end
!descendant && push!(Q,τ1)
end
Q = unique(Q)
length(Q)
length(P)
balance = zeros(numfunctions(X1))
for q in Q
I = q[1].begin_idx : q[1].end_idx-1
balance[permp[I]] .+= 1
end
@show extrema(balance)
using MATLAB
mat"figure()"
mat"hold("on")"
V = [v[i] for v in Γ1.vertices, i in 1:3]
F = [f[i] for f in Γ1.faces, i in 1:3]
@mput V F
for (r,q) in enumerate(Q)
I = q[1].begin_idx : q[1].end_idx-1
I = permp[I]
@mput I r
mat"patch("Vertices",V,"Faces",F[I,:],"FaceColor",rand(3,1))"
end
## Another sanity check: everything can be reached from the root
for (i,τ) in enumerate(tp)
reached = false
depthfirst(tp) do st,l
st[1] == τ && (reached = true)
end
if !reached
@show τ
@show i
error()
end
end
|
proofpile-julia0005-42462 | {
"provenance": "014.jsonl.gz:242463"
} | c1 = Patience(1)
c2 = InvalidValue()
c3 = TimeLimit(t=100)
@test Disjunction(c1) == c1
d = c1 + c2 + Never() + c3 + c1
show(d)
# codecov:
@test zero(typeof(c1)) == Never()
@test sum(Disjunction[]) == Never()
@test Never() in c1
@testset "_criteria" begin
criteria = EarlyStopping._criteria(d)
@test length(criteria) == 3
@test issubset([c1, c2, c3], criteria)
end
@testset "stoppping times" begin
d2 = Patience(3) + InvalidValue()
@test stopping_time(d2, [12.0, 10.0, 11.0, 12.0, 13.0, NaN]) == 5
@test stopping_time(d2, [NaN, 12.0, 10.0, 11.0, 12.0, 13.0]) == 1
end
@testset "message" begin
state = EarlyStopping.update(d, NaN)
@test EarlyStopping.message(d, state) ==
"Stopping early as `NaN`, "*
"`Inf` or `-Inf` encountered. "
end
state = EarlyStopping.update(d, 1.0)
state = EarlyStopping.update(d, 2.0, state)
@test EarlyStopping.message(d, state) ==
"Stop triggered by Patience(1) stopping criterion. "
|
proofpile-julia0005-42463 | {
"provenance": "014.jsonl.gz:242464"
} | export FT_Init_FreeType
export FT_Done_FreeType
export FT_New_Face
export FT_New_Memory_Face
export FT_Open_Face
export FT_Attach_File
export FT_Attach_Stream
export FT_Reference_Face
export FT_Done_Face
export FT_Select_Size
export FT_Request_Size
export FT_Set_Char_Size
export FT_Set_Pixel_Sizes
export FT_Load_Glyph
export FT_Load_Char
export FT_Set_Transform
export FT_Render_Glyph
export FT_Get_Kerning
export FT_Get_Track_Kerning
export FT_Get_Glyph_Name
export FT_Get_Postscript_Name
export FT_Select_Charmap
export FT_Set_Charmap
export FT_Get_Charmap_Index
export FT_Get_Char_Index
export FT_Get_First_Char
export FT_Get_Next_Char
export FT_Get_Name_Index
export FT_Get_SubGlyph_Info
export FT_Get_FSType_Flags
export FT_Face_GetCharVariantIndex
export FT_Face_GetCharVariantIsDefault
export FT_Face_GetVariantSelectors
export FT_Face_GetVariantsOfChar
export FT_Face_GetCharsOfVariant
export FT_MulDiv
export FT_DivFix
export FT_RoundFix
export FT_CeilFix
export FT_FloorFix
export FT_Vector_Transform
export FT_Library_Version
export FT_Face_CheckTrueTypePatents
export FT_Face_SetUnpatentedHinting
export FT_Outline_Decompose
export FT_Int16
export FT_UInt16
export FT_Int32
export FT_UInt32
export FT_Fast
export FT_UFast
export FT_Memory
export FT_Alloc_Func
export FT_Free_Func
export FT_Realloc_Func
export FT_Stream
export FT_Stream_IoFunc
export FT_Stream_CloseFunc
export FT_Pos
export FT_Pixel_Mode
export FT_Outline_MoveToFunc
export FT_Outline_LineToFunc
export FT_Outline_ConicToFunc
export FT_Outline_CubicToFunc
export FT_Glyph_Format
export FT_Raster
export FT_SpanFunc
export FT_Raster_BitTest_Func
export FT_Raster_BitSet_Func
export FT_Raster_NewFunc
export FT_Raster_DoneFunc
export FT_Raster_ResetFunc
export FT_Raster_SetModeFunc
export FT_Raster_RenderFunc
export FT_Bool
export FT_FWord
export FT_UFWord
export FT_Char
export FT_Byte
export FT_Bytes
export FT_Tag
export FT_String
export FT_Short
export FT_UShort
export FT_Int
export FT_UInt
export FT_Long
export FT_ULong
export FT_F2Dot14
export FT_F26Dot6
export FT_Fixed
export FT_Error
export FT_Pointer
export FT_Offset
export FT_PtrDist
export FT_Generic_Finalizer
export FT_ListNode
export FT_List
export FT_Library
export FT_Module
export FT_Driver
export FT_Renderer
export FT_Face
export FT_Size
export FT_GlyphSlot
export FT_CharMap
export FT_Encoding
export FT_Face_Internal
export FT_Size_Internal
export FT_SubGlyph
export FT_Slot_Internal
export FT_Size_Request_Type
export FT_Size_Request
export FT_Render_Mode
export FT_Kerning_Mode
export FT_LOAD_DEFAULT
export FT_LOAD_TARGET_NORMAL
export FT_LOAD_NO_SCALE
export FT_LOAD_NO_HINTING
export FT_LOAD_RENDER
export FT_LOAD_NO_BITMAP
export FT_LOAD_VERTICAL_LAYOUT
export FT_LOAD_FORCE_AUTOHINT
export FT_LOAD_CROP_BITMAP
export FT_LOAD_PEDANTIC
export FT_LOAD_ADVANCE_ONLY
export FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH
export FT_LOAD_NO_RECURSE
export FT_LOAD_IGNORE_TRANSFORM
export FT_LOAD_MONOCHROME
export FT_LOAD_LINEAR_DESIGN
export FT_LOAD_SBITS_ONLY
export FT_LOAD_NO_AUTOHINT
export FT_LOAD_TARGET_LIGHT
export FT_LOAD_TARGET_MONO
export FT_LOAD_TARGET_LCD
export FT_LOAD_TARGET_LCD_V
export FT_LOAD_COLOR
export FT_FACE_FLAG_SCALABLE
export FT_FACE_FLAG_FIXED_SIZES
export FT_FACE_FLAG_FIXED_WIDTH
export FT_FACE_FLAG_SFNT
export FT_FACE_FLAG_HORIZONTAL
export FT_FACE_FLAG_VERTICAL
export FT_FACE_FLAG_KERNING
export FT_FACE_FLAG_FAST_GLYPHS
export FT_FACE_FLAG_MULTIPLE_MASTERS
export FT_FACE_FLAG_GLYPH_NAMES
export FT_FACE_FLAG_EXTERNAL_STREAM
export FT_FACE_FLAG_HINTER
export FT_FACE_FLAG_CID_KEYED
export FT_FACE_FLAG_TRICKY
export FT_FACE_FLAG_COLOR
export FT_RENDER_MODE_NORMAL
export FT_RENDER_MODE_LIGHT
export FT_RENDER_MODE_MONO
export FT_RENDER_MODE_LCD
export FT_RENDER_MODE_LCD_V
export FT_RENDER_MODE_MAX
export FT_PIXEL_MODE_NONE
export FT_PIXEL_MODE_MONO
export FT_PIXEL_MODE_GRAY
export FT_PIXEL_MODE_GRAY2
export FT_PIXEL_MODE_GRAY4
export FT_PIXEL_MODE_LCD
export FT_PIXEL_MODE_LCD_V
export FT_PIXEL_MODE_BGRA
export FT_PIXEL_MODE_MAX
|
proofpile-julia0005-42464 | {
"provenance": "014.jsonl.gz:242465"
} | module GatedLinearNetworks
export GaussianGGLN, train!, predict
using Flux
using Statistics
"""
Normalize input using a online estimate of mean and variance using Welford's
algorithm.
"""
struct NormalizationLayer{VF <: AbstractVector{<:Real}, VI <: AbstractVector{Int}}
count::VI # [1]
mean::VF # [data_dim]
m2::VF # [data_dim]
end
function NormalizationLayer(n::Int)
return NormalizationLayer(
Int[0],
zeros(Float32, n),
zeros(Float32, n))
end
"""
Apply standardization to the input, and update parameters if `training` is true.
This is basically batch normalization.
x is [data_dim, batch_dim]
"""
function forward(layer::NormalizationLayer, x::AbstractMatrix, training::Bool)
if training
for j in 1:size(x, 2)
layer.count .+= 1
xj = x[:,j]
delta = xj - layer.mean
layer.mean .+= delta ./ layer.count
delta2 = xj - layer.mean
layer.m2 .+= delta .* delta2
end
end
@assert layer.count[1] > 0
sd = sqrt.(layer.m2 ./ layer.count)
return (x .- layer.mean) ./ sd
end
function inverse(layer::NormalizationLayer, z::AbstractArray)
sd = sqrt.(layer.m2 ./ layer.count)
return (z .* sd) .+ layer.mean
end
function inverse(layer::NormalizationLayer, μ::AbstractArray, σ2::AbstractArray)
sd = sqrt.(layer.m2 ./ layer.count)
μ_inv = (μ .* sd) .+ layer.mean
σ2_inv = σ2 .* sd.^2
return μ_inv, σ2_inv
end
abstract type GaussianWeakLearner end
"""
Bayesian gaussian linear regression with known precision. Used as a weak
learner input to the GGLN.
"""
struct BasicGaussianLinearRegression{
VF <: AbstractVector{<:Real},
MF <: AbstractMatrix{<:Real},
VI <: AbstractVector{Int}} <: GaussianWeakLearner
# accumulated sufficient statistics
xy::MF # [predictor_dim, prediction_dim]
xx::VF # [predictor_dim]
x::VF # [prediction_dim]
y::VF # [prediction_dim]
count::VI # [1]
# known prior precision
τ0::VF # [prediction_dim]
τ::VF # [prediction_dim]
end
"""
Construct a bayesian linear regression.
* `predictor_dim`: dimensionality of predictors
* `prediction_dim`: dimensionality of predictions
"""
function BasicGaussianLinearRegression(predictor_dim::Int, prediction_dim::Int)
return BasicGaussianLinearRegression(
zeros(Float32, (predictor_dim, prediction_dim)),
zeros(Float32, predictor_dim),
zeros(Float32, predictor_dim),
zeros(Float32, prediction_dim),
zeros(Int, 1),
Float32[1f0],
Float32[1f0])
end
"""
Forward pass for the linear regression. Train when `y` is given, otherwise
only output posterior predictive parameters.
"""
function forward(
layer::BasicGaussianLinearRegression,
x::AbstractMatrix, y::Union{Nothing, AbstractMatrix})
predictor_dim = size(layer.xy, 1)
prediction_dim = size(layer.xy, 2)
batch_dim = size(x, 2)
@assert size(x, 1) == predictor_dim
@assert y === nothing || size(y, 1) == prediction_dim
@assert y === nothing || size(y, 2) == batch_dim
# [predictor_dim]
c1 = (layer.τ0 .+ layer.τ .* layer.count) .*
(layer.τ0 .+ layer.τ .* layer.xx) .- (layer.τ .* layer.x).^2
# [predictor_dim]
c2 = layer.τ ./ c1
# [predictor_dim, prediction_dim]
uw = (layer.τ0 .+ layer.τ .* layer.count) .* layer.xy
vw = layer.τ .* reshape(layer.x, (predictor_dim, 1)) .*
reshape(layer.y, (1, prediction_dim))
# [predictor_dim, prediction_dim]
μ_w = c2 .* (uw .- vw)
# [predictor_dim, prediction_dim]
ub = (layer.τ0 .+ layer.τ .* reshape(layer.xx, (predictor_dim, 1))) .*
reshape(layer.y, (1, prediction_dim))
vb = layer.τ .* layer.x .* layer.xy
# [predictor_dim, predction_dim]
μ_b = c2 .* (ub .- vb)
# [predictors_dim, batch_dim]
σ2 = inv.(layer.τ) .+ inv.(c1) .* (
(layer.τ0 .+ layer.τ .* layer.count) .* x.^2 .-
2f0 .* layer.τ .* x .* layer.x .+
layer.τ0 .+
layer.τ .* layer.xx)
# repeat this across the prediction_dim, since this is an isotropic gaussian
# [predictior_dim, prediction_dim, batch_dim]
σ2 = repeat(reshape(σ2, (predictor_dim, 1, batch_dim)), 1, prediction_dim, 1)
# [predictors_dim, predictions_dim, batch_dim]
μ = reshape(x, (predictor_dim, 1, batch_dim)) .*
reshape(μ_w, (predictor_dim, prediction_dim, 1)) .+
reshape(μ_b, (predictor_dim, prediction_dim, 1))
if y !== nothing
layer.xx .+= dropdims(sum(x.*x, dims=2), dims=2)
layer.xy .+=
dropdims(sum(reshape(x, (predictor_dim, 1, batch_dim)) .*
reshape(y, (1, prediction_dim, batch_dim)), dims=3), dims=3)
layer.x .+= dropdims(sum(x, dims=2), dims=2)
layer.y .+= dropdims(sum(y, dims=2), dims=2)
layer.count .+= size(x, 2)
end
# [predictor_dim, predicton_dim, batch_dim]
return μ, σ2
end
"""
Basically MinHash.
"""
struct LocalitySensitiveHash{
MF <: AbstractMatrix{<:Real},
VF <: AbstractVector{<:Real},
VI <: AbstractVector{Int32}}
output_dim::Int
context_dim::Int
# context function parameters
hyperplanes::MF
hyperplanes_bias::VF
# used to compute weight indexes
bit_offsets::VI
k_offsets::VI
end
"""
Construct a locality sensitive hash function.
* `output_dim`: number of outputs (i.e. number of units in this layer)
* `context_dim`: number of context functions (inducing 2^context_dim weight vectors)
* `predictor_dim`: dimensionality of predictors
"""
function LocalitySensitiveHash(output_dim::Int, context_dim::Int, predictor_dim::Int)
hyperplanes = randn(Float32, (output_dim*context_dim, predictor_dim))
hyperplanes_bias = randn(Float32, (output_dim*context_dim))
bit_offsets = collect(Int32, 0:context_dim-1)
k_offsets = collect(Int32, 1:output_dim)
return LocalitySensitiveHash(
output_dim, context_dim, hyperplanes, hyperplanes_bias, bit_offsets, k_offsets)
end
"""
Apply the context functions for this layer, mapping (standardized) input vectors
to indexes in [1, 2^context_dim].
"""
function (lsh::LocalitySensitiveHash)(X::AbstractMatrix)
bits = (lsh.hyperplanes * X) .> lsh.hyperplanes_bias
batch_dim = size(X, 2)
bits_reshape = reshape(bits, (lsh.context_dim, lsh.output_dim, batch_dim))
cs = dropdims(sum(bits_reshape .<< lsh.bit_offsets, dims=1), dims=1) .+ lsh.k_offsets
return cs
end
"""
A single GGLN layer with arbitrary number of units.
"""
struct GGLNLayer{
MF <: AbstractMatrix{<:Real},
VF <: AbstractVector{<:Real},
VI <: AbstractVector{Int32}}
input_dim::Int
output_dim::Int
context_dim::Int
predictor_dim::Int
prediction_dim::Int
learning_rate::Float64
lsh::LocalitySensitiveHash{MF, VF, VI}
weights::MF
end
"""
Construct a single GGLN layer.
* `input_dim`: number of inputs (i.e. number of units in the prev layer)
* `output_dim`: number of outputs (i.e. number of units in this layer)
* `context_dim`: number of context functions (inducing 2^context_dim weight vectors)
* `predictor_dim`: dimensionality of predictors
* `prediction_dim`: dimensionality of predictions
* `learning_rate`: controls step size
"""
function GGLNLayer(
input_dim::Int, output_dim::Int,
context_dim::Int, predictor_dim::Int, prediction_dim::Int,
learning_rate::Float64=1e-2)
lsh = LocalitySensitiveHash(output_dim, context_dim, predictor_dim)
weights = fill(log(1f0/input_dim), (input_dim, output_dim*(2^context_dim)))
return GGLNLayer(
input_dim,
output_dim,
context_dim,
predictor_dim,
prediction_dim,
learning_rate,
lsh,
weights)
end
"""
Forward pass for a single GGLN layer, training if `y` is given, predicting if not.
input_μ: [input_dim, prediction_dim, batch_dim]
input_σ2: [input_dim, prediction_dim, batch_dim]
z: [predictor_dim, batch_dim]
y: [prediction_dim, batch_dim]
"""
function forward(
layer::GGLNLayer, input_μ::AbstractArray, input_σ2::AbstractArray,
z::AbstractMatrix, y::Union{Nothing, AbstractMatrix})
# input_μ, input_σ should be [input_dim, batch_dim]
batch_dim = size(input_μ, 3)
# [output_dim, batch_dim]
cs = layer.lsh(z)
function predict(weights)
# [input_dim, output_dim, 1, batch_dim]
weights_cs = weights[:,cs]
penalty = 1f-5 * sum(weights_cs.^2)
weights_ = exp.(reshape(
weights_cs, (layer.input_dim, layer.output_dim, 1, batch_dim)))
# [input_dim, 1, prediction_dim, batch_dim]
input_μ_ = reshape(input_μ, (layer.input_dim, 1, layer.prediction_dim, batch_dim))
input_σ2_ = reshape(input_σ2, (layer.input_dim, 1, layer.prediction_dim, batch_dim))
# [output_dim, prediction_dim, batch_dim]
σ2 = inv.(dropdims(sum(weights_ ./ input_σ2_, dims=1), dims=1))
# [output_dim, prediction_dim, batch_dim]
μ = σ2 .* dropdims(sum(weights_ .* input_μ_ ./ input_σ2_, dims=1), dims=1)
return μ, σ2, penalty
end
if y !== nothing
function loss(weights)
# [output_dim, prediction_dim, batch_dim]
μ, σ2, penalty = predict(weights)
σ = sqrt.(σ2)
y_ = reshape(y, (1, layer.prediction_dim, batch_dim))
# negative normal log-pdf
neg_ll = -sum(.- log.(σ) .- 0.5 * log.(2*π) .- 0.5 .* ((y_ .- μ) ./ σ).^2)
return neg_ll + penalty
# return neg_ll
end
dloss_dweights = gradient(loss, layer.weights)[1]
layer.weights .-= layer.learning_rate .* dloss_dweights
clamp!(layer.weights, -10f0, 10f0)
end
return predict(layer.weights)
end
"""
Complete GGLN model.
"""
struct GaussianGGLN{AF <: AbstractArray{<:Real,3}}
x_norm::NormalizationLayer
y_norm::NormalizationLayer
weak_learner::BasicGaussianLinearRegression
layers::Vector{GGLNLayer}
# [bias_dim, prediction_dim, 1]
μ_bias::AF
σ2_bias::AF
end
"""
Construct an untrained gaussian gated linear network.
* `predictor_dim`: dimensionality of predictors (x)
* `prediction_dim`: dimensionality of predictions (y)
* `num_layers`: number of layers in the model
* `layer_width`: width of each layer in the model
* `context_dim`: number of context functions (inducing 2^context_dim weight vectors)
* `bias`: values for `bias` units.
"""
function GaussianGGLN(
predictor_dim::Int, prediction_dim::Int,
num_layers::Int, layer_width::Int, context_dim::Int,
bias::Float32=5.0f0)
bias_dim = 2*prediction_dim
layers = GGLNLayer[]
if num_layers > 0
push!(
layers, GGLNLayer(
predictor_dim + bias_dim, layer_width, context_dim,
predictor_dim, prediction_dim))
end
for i in 1:num_layers-1
push!(
layers, GGLNLayer(
layer_width + bias_dim, layer_width, context_dim,
predictor_dim, prediction_dim))
end
# compute the bias arrays
μ_bias = Array{Float32}(undef, (bias_dim, prediction_dim, 1))
μ_bias = zeros(Float32, (bias_dim, prediction_dim, 1))
# We want a bias unit for +/- bias for every prediction dimension.
for i in 1:prediction_dim
μ_bias[i, i, 1] = -bias
μ_bias[prediction_dim+i, i, 1] = bias
end
σ2_bias = ones(Float32, (bias_dim, prediction_dim, 1))
return GaussianGGLN(
NormalizationLayer(predictor_dim),
NormalizationLayer(prediction_dim),
BasicGaussianLinearRegression(predictor_dim, prediction_dim),
layers, μ_bias, σ2_bias)
end
"""
Forward pass for the GGLN, training if y is given, predicting otherwise.
"""
function forward(ggln::GaussianGGLN, x::AbstractMatrix, y::Union{Nothing, AbstractMatrix})
batch_dim = size(x, 2)
@assert y === nothing || size(x, 2) == size(y, 2)
@assert y === nothing || size(y, 1) == size(ggln.μ_bias, 2)
xz = forward(ggln.x_norm, x, y !== nothing)
yz = y === nothing ? nothing : forward(ggln.y_norm, y, true)
μ, σ2 = forward(ggln.weak_learner, xz, yz)
for layer in ggln.layers
# concatenate bias inputs
μ = cat(repeat(ggln.μ_bias, 1, 1, batch_dim), μ, dims=1)
σ2 = cat(repeat(ggln.σ2_bias, 1, 1, batch_dim), σ2, dims=1)
# [output_dim, prediction_dim, batch_dim]
μ, σ2 = forward(layer, μ, σ2, xz, yz)
end
return inverse(
ggln.y_norm,
dropdims(mean(μ, dims=1), dims=1),
dropdims(mean(σ2, dims=1), dims=1))
end
"""
Train the model one step using a the batch of predictors `x` and predictions
`y`.
`x` should have shape [predictors dimension, batch size]
`y` should have shape [predictions dimension, batch size]
"""
function train!(ggln::GaussianGGLN, x::AbstractMatrix, y::AbstractMatrix)
return forward(ggln, x, y)
end
"""
Make predictions using a trained model for predictors `x`.
`x` should have shape [predictors dimension, batch size]
"""
function predict(ggln::GaussianGGLN, x::AbstractMatrix)
return forward(ggln, x, nothing)
end
end # module
|
proofpile-julia0005-42465 | {
"provenance": "014.jsonl.gz:242466"
} | using Plots
# plotly()
@testset "plot with no errors" begin
res0 = [get_interval(
[3., 2., 2.1],
i,
f_3p_1im_dep,
:LIN_EXTRAPOL;
local_alg = :LN_NELDERMEAD,
theta_bounds = [(-12., 12.), (-12., 12.), (-12., 12.)],
loss_tol = 1e-3,
loss_crit = 9.,
silent = true
) for i in 1:3]
update_profile_points!(res0[1]; max_recursions=1)
a_grid_1 = LikelihoodProfiler.get_grid(res0[1])
@test length(a_grid_1[2]) > 0
p = plot(res0[1])
@test p isa Plots.Plot
end
|
proofpile-julia0005-42466 | {
"provenance": "014.jsonl.gz:242467"
} | module Contractions
# export
export fix_contractions
"""
fix_contractions: Fix Contractions in a given text
params:
------
- text:: AbstractString , given text
usage:
------
>>> fix_contractions(mytext)
"""
function fix_contractions(text::AbstractString)
text = replace.(text,"ain't"=>"am not")
text = replace.(text,"aren't"=>"are not")
text = replace.(text,"can't"=>"cannot")
text = replace.(text,"can't've"=>"cannot have")
text = replace.(text,"'cause"=>"because")
text = replace.(text,"could've"=>"could have")
text = replace.(text,"couldn't"=>"could not")
text = replace.(text,"couldn't've"=>"could not have")
text = replace.(text,"didn't"=>"did not")
text = replace.(text,"doesn't"=>"does not")
text = replace.(text,"don't"=>"do not")
text = replace.(text,"hadn't"=>"had not")
text = replace.(text,"hadn't've"=>"had not have")
text = replace.(text,"hasn't"=>"has not")
text = replace.(text,"haven't"=>"have not")
text = replace.(text,"he'd"=>"he would")
text = replace.(text,"he'd've"=>"he would have")
text = replace.(text,"he'll"=>"he will")
text = replace.(text,"he'll've"=>"he will have")
text = replace.(text,"he's"=>"he is")
text = replace.(text,"how'd"=>"how did")
text = replace.(text,"how'd'y"=>"how do you")
text = replace.(text,"how'll"=>"how will")
text = replace.(text,"how's"=>"how is")
text = replace.(text,"I'd"=>"I would")
text = replace.(text,"I'd've"=>"I would have")
text = replace.(text,"I'll"=>"I will")
text = replace.(text,"I'll've"=>"I will have")
text = replace.(text,"I'm"=>"I am")
text = replace.(text,"I've"=>"I have")
text = replace.(text,"isn't"=>"is not")
text = replace.(text,"it'd"=>"it had")
text = replace.(text,"it'd've"=>"it would have")
text = replace.(text,"it'll"=>"it will")
text = replace.(text,"it'll've"=>"it will have")
text = replace.(text,"it's"=>"it is")
text = replace.(text,"let's"=>"let us")
text = replace.(text,"ma'am"=>"madam")
text = replace.(text,"mayn't"=>"may not")
text = replace.(text,"might've"=>"might have")
text = replace.(text,"mightn't"=>"might not")
text = replace.(text,"mightn't've"=>"might not have")
text = replace.(text,"must've"=>"must have")
text = replace.(text,"mustn't"=>"must not")
text = replace.(text,"mustn't've"=>"must not have")
text = replace.(text,"needn't"=>"need not")
text = replace.(text,"needn't've"=>"need not have")
text = replace.(text,"o'clock"=>"of the clock")
text = replace.(text,"oughtn't"=>"ought not")
text = replace.(text,"oughtn't've"=>"ought not have")
text = replace.(text,"shan't"=>"shall not")
text = replace.(text,"sha'n't"=>"shall not")
text = replace.(text,"shan't've"=>"shall not have")
text = replace.(text,"she'd"=>"she would")
text = replace.(text,"she'd've"=>"she would have")
text = replace.(text,"she'll"=>"she will")
text = replace.(text,"she'll've"=>"she will have")
text = replace.(text,"she's"=>"she is")
text = replace.(text,"should've"=>"should have")
text = replace.(text,"shouldn't"=>"should not")
text = replace.(text,"shouldn't've"=>"should not have")
text = replace.(text,"so've"=>"so have")
text = replace.(text,"so's"=>"so is")
text = replace.(text,"that'd"=>"that would")
text = replace.(text,"that'd've"=>"that would have")
text = replace.(text,"that's"=>"that is")
text = replace.(text,"there'd"=>"there had")
text = replace.(text,"there'd've"=>"there would have")
text = replace.(text,"there's"=>"there is")
text = replace.(text,"they'd"=>"they would")
text = replace.(text,"they'd've"=>"they would have")
text = replace.(text,"they'll"=>"they will")
text = replace.(text,"they'll've"=>"they will have")
text = replace.(text,"they're"=>"they are")
text = replace.(text,"they've"=>"they have")
text = replace.(text,"to've"=>"to have")
text = replace.(text,"wasn't"=>"was not")
text = replace.(text,"we'd"=>"we had")
text = replace.(text,"we'd've"=>"we would have")
text = replace.(text,"we'll"=>"we will")
text = replace.(text,"we'll've"=>"we will have")
text = replace.(text,"we're"=>"we are")
text = replace.(text,"we've"=>"we have")
text = replace.(text,"weren't"=>"were not")
text = replace.(text,"what'll"=>"what will")
text = replace.(text,"what'll've"=>"what will have")
text = replace.(text,"what're"=>"what are")
text = replace.(text,"what's"=>"what is")
text = replace.(text,"what've"=>"what have")
text = replace.(text,"when's"=>"when is")
text = replace.(text,"when've"=>"when have")
text = replace.(text,"where'd"=>"where did")
text = replace.(text,"where's"=>"where is")
text = replace.(text,"where've"=>"where have")
text = replace.(text,"who'll"=>"who will")
text = replace.(text,"who'll've"=>"who will have")
text = replace.(text,"who's"=>"who is")
text = replace.(text,"who've"=>"who have")
text = replace.(text,"why's"=>"why is")
text = replace.(text,"why've"=>"why have")
text = replace.(text,"will've"=>"will have")
text = replace.(text,"won't"=>"will not")
text = replace.(text,"won't've"=>"will not have")
text = replace.(text,"would've"=>"would have")
text = replace.(text,"wouldn't"=>"would not")
text = replace.(text,"wouldn't've"=>"would not have")
text = replace.(text,"y'all"=>"you all")
text = replace.(text,"y'alls"=>"you alls")
text = replace.(text,"y'all'd"=>"you all would")
text = replace.(text,"y'all'd've"=>"you all would have")
text = replace.(text,"y'all're"=>"you all are")
text = replace.(text,"y'all've"=>"you all have")
text = replace.(text,"you'd"=>"you had")
text = replace.(text,"you'd've"=>"you would have")
text = replace.(text,"you'll"=>"you you will")
text = replace.(text,"you'll've"=>"you you will have")
text = replace.(text,"you're"=>"you are")
text = replace.(text,"you've"=>"you have")
return text
end
end
|
proofpile-julia0005-42467 | {
"provenance": "014.jsonl.gz:242468"
} | using TensorToolbox
include("LTR.jl")
include("methods/NTD.jl")
include("methods/lraSNTD.jl")
include("methods/LD/LD.jl")
function tucker_to_cprank(tucker_rank)
if all(isequal(first(tucker_rank)),tucker_rank)
cp = tucker_rank[1]
else
error("rank $tucker_rank cannot convert to CP rank")
end
return cp
end
function calc(X, method, reqrank)
if method == "LT1R"
@assert tucker_to_cprank(reqrank) == 1
return LT1R(X)
elseif method == "LTR"
return LTR(X, reqrank)
elseif method == "NTD_KL"
_, _, Xr = NTD(X, reqrank, cost="KL", init_method=NTD_init, verbose=false)
return Xr
elseif method == "NTD_LS"
_, _, Xr = NTD(X, reqrank, cost="LS", init_method=NTD_init, verbose=false)
return Xr
elseif method == "lraSNTD"
return lraSNTD(X, reqrank)
else
error("calc method error")
end
end
|
proofpile-julia0005-42468 | {
"provenance": "014.jsonl.gz:242469"
} | #!/usr/local/bin/julia
using MultivariatePolynomials
using JuMP
using PolyJuMP
using SumOfSquares
using ArgParse
function try_import(name::Symbol)
try
@eval import $name
return true
catch e
return false
end
end
scs = try_import(:SCS)
csdp = try_import(:CSDP)
isscs(solver) = contains(string(typeof(solver)),"SCSSolver")
# Semidefinite solvers
sdp_solvers = Any[]
# Need 54000 iterations for sosdemo3 to pass on Linux 64 bits
# With 55000, sosdemo3 passes for every platform except Windows 64 bits on AppVeyor
scs && push!(sdp_solvers, SCS.SCSSolver(eps=1e-6, max_iters=60000, verbose=0))
csdp && push!(sdp_solvers, CSDP.CSDPSolver(printlevel=1, maxiter=60000, axtol=1.0e-8, atytol=1.0e-8))
function main(args)
global sdp_solvers
s = ArgParseSettings("Example 4 for argparse.jl: " *
"more tweaking of the arg fields: " *
"dest_name, metvar, range_tested, " *
"alternative actions.")
@add_arg_table s begin
"--opt1"
action = :append_const # appends 'constant' to 'dest_name'
arg_type = ByteString # the only utility of this is restricting the dest array type
constant = "O1"
dest_name = "O_stack" # this changes the destination
help = "append O1"
end
s.epilog = """
freeSir.jl
"""
parsed_args = parse_args(args, s)
println("Parsed args:")
for (key,val) in parsed_args
println(" $key => $(repr(val))")
end
println("calling the solvers")
for solver in sdp_solvers
@polyvar x[1:2]
μ = 0.2
β = 0.5
γ = 0.6
s = 1.
i = 0.
R0 = β / ( μ + γ )
# Constructing the vector field dx/dt = f
f = [μ - β * (x[1] + s) * (x[2] + i) - μ * (x[1] + s),
β * (x[1] + s) * (x[2] + i) - (μ + γ) * (x[2] + i)]
#f = [0.06 - 0.01 * (x[1] + 1.) * x[2] - 0.06 * (x[1] + 1.),
# 0.01 * (x[1] + 1.) * x[2] - (0.06 + 0.5) * x[2]]
#println(f)
m = Model(solver = solver)
# The Lyapunov function V(x):
Z = x.^2
@polyvariable m V Z
@polyconstraint m V >= 1e-16 * sum(x.^2)
# dV/dx*f <= 0
P = dot(differentiate(V, x), f)
@polyconstraint m P <= 0
@polyconstraint m x[1] * ( x[1] + 1) <= 0
@polyconstraint m x[2] * ( x[2] - 1) <= 0
println(x)
println("este es el modelo final")
print(m)
println("valor de la funcion objetivo", getobjectivevalue(m))
status = solve(m);
flyapunov = getvalue(V)
println("Lyapunov function using $(typeof(solver))")
println(flyapunov)
println("Derivada Orbital")
println(dot(differentiate(flyapunov, x), f))
println(status)
println("Basic Reproductive Number")
println(R0)
#status --> :Optimal
#removemonomials(getvalue(V), Z) --> zero(Polynomial{true, Float64})
end
end
main(ARGS)
|
proofpile-julia0005-42469 | {
"provenance": "014.jsonl.gz:242470"
} | using DrWatson
@quickactivate("HyperspectraWithNeXL")
# Load the necessary libraries
using HyperspectraWithNeXL
using NeXLSpectrum
using Unitful
using FileIO, ImageIO
# Load the HyperSpectrum from disk
lt = 0.72*4.0*18.0*3600.0/(1024*1024) # 18.0 hours on 4 detectors
hs = NeXLSpectrum.compress(HyperSpectrum(
LinearEnergyScale(0.0,10.0),
Dict{Symbol,Any}(
:TakeOffAngle => deg2rad(35.0),
:ProbeCurrent => 1.0,
:LiveTime => lt,
:BeamEnergy => 20.0e3,
:Name => "Mn Nodule"
),
readrplraw(joinpath(datadep"MnNodule","map[15]")),
fov = [ 4.096u"mm", 4.096u"mm"], offset= [ 0.0u"mm", 0.0u"mm" ]
))
outpath = joinpath(papersdir(),"Figures","Figure 5")
mkpath(outpath)
cp(joinpath(datadep"MnNodule","Image[0][[1]].png"), joinpath(outpath,"Mn_nodule_BSED.png"), force=true)
# Independent normalization (to brightest pixel)
outpath = joinpath(papersdir(),"Figures","Figure 6")
mkpath(outpath)
FileIO.save(File{format"PNG"}(joinpath(outpath,"I[C K-L2].png")), hs[n"Mn K-L3"])
FileIO.save(File{format"PNG"}(joinpath(outpath,"I[Mn K-L3].png")), hs[n"Mn K-L3"])
FileIO.save(File{format"PNG"}(joinpath(outpath,"I[Fe K-L3].png")), hs[n"Fe K-L3"])
FileIO.save(File{format"PNG"}(joinpath(outpath,"I[O K-L3].png")), hs[n"O K-L3"])
# Normalized as a set (normalize to sum of intensities)
outpath=plotsdir()
mkpath(outpath)
cxrs = [n"Mn K-L3", n"O K-L3", n"Fe K-L3" ]
imgs = hs[ cxrs ]
for (cxr, img) in zip(cxrs, imgs)
FileIO.save(File{format"PNG"}(joinpath(outpath,"I_rel[$cxr].png")), img)
end
# RGB
outpath = joinpath(papersdir(),"Figures","Figure 7")
mkpath(outpath)
img = colorize(hs, cxrs, :All)
FileIO.save(File{format"PNG"}(joinpath(outpath,"colorized[Mn,O,Fe,All].png")), img)
img = colorize(hs, cxrs, :Each)
FileIO.save(File{format"PNG"}(joinpath(outpath,"colorized[Mn,O,Fe,Each].png")), img) |
proofpile-julia0005-42470 | {
"provenance": "014.jsonl.gz:242471"
} | @testset "Levenberg-Marquardt: NIST Dataset (BoxBOD)" begin
print_head("Levenberg-Marquardt: NIST Dataset (BoxBOD)")
obj = optimizer(nd=2, ny=6, method="lm")
fun = BoxBOD()
sol = Vector{Float64}(undef, 2)
parμ = [10.0, 5.5]
βvec = zeros(Float64, 6)
βmat = zeros(Float64, 6, 6)
print_body("\033[1m\033[34mLikelihood Precision Matrix: Identity\033[0m")
@simd for i in eachindex(βvec)
@inbounds βvec[i] = 1.0
end
@simd for i in eachindex(βvec)
@inbounds βmat[i,i] = 1.0
end
ans = [213.809409, 0.54723748]
print_body(string("Precision Const.: ", optimize!(sol, fun, parμ, 1.0, obj)))
@inbounds for i in eachindex(ans)
@test sol[i] ≈ ans[i] rtol=1e-7
end
print_body(string("Precision Vector: ", optimize!(sol, fun, parμ, βvec, obj)))
@inbounds for i in eachindex(ans)
@test sol[i] ≈ ans[i] rtol=1e-7
end
print_body(string("Precision Matrix: ", optimize!(sol, fun, parμ, βmat, obj)))
@inbounds for i in eachindex(ans)
@test sol[i] ≈ ans[i] rtol=1e-7
end
print_body("\033[1m\033[34mLikelihood Precision Matrix: Custom\033[0m")
@simd for i in eachindex(βvec)
@inbounds βvec[i] = 1.0 + 0.1 * (i-1)
end
@simd for i in eachindex(βvec)
@inbounds βmat[i,i] = 1.0 + 0.1 * (i-1)
end
@inbounds ans[1], ans[2] = 216.132114, 0.52157598
print_body(string("Precision Vector: ", optimize!(sol, fun, parμ, βvec, obj)))
@inbounds for i in eachindex(ans)
@test sol[i] ≈ ans[i] rtol=1e-7
end
print_body(string("Precision Matrix: ", optimize!(sol, fun, parμ, βmat, obj)))
@inbounds for i in eachindex(ans)
@test sol[i] ≈ ans[i] rtol=1e-7
end
end
|
proofpile-julia0005-42471 | {
"provenance": "014.jsonl.gz:242472"
} | using DataFrames # readtable function
# using Requests
type Bus
nodeID::Int
root::Int
Pd::Float64
Qd::Float64
Vmax::Float64
Vmin::Float64
B::Float64
R::Float64
X::Float64
children::Vector{Int}
ancestor::Vector{Int}
genids::Int
function Bus(nodeID, root, Pd, Qd, B, R, X, Vmax, Vmin)
b = new(nodeID, root, Pd, Qd)
b.Vmax = Vmax
b.Vmin = Vmin
b.R = R
b.X = X
b.B = B
b.children = Int[]
b.ancestor = Int[]
# b.genids = 0
return b
end
end
#################################################################
type Generator
genID::Int
busidx::Int
Pgmax::Float64
Pgmin::Float64
Qgmax::Float64
Qgmin::Float64
cost::Float64
function Generator( busidx, Pgmax, Pgmin, Qgmax, Qgmin, cost)
g = new(busidx)
g.busidx = busidx
g.cost = cost
g.Pgmax = Pgmax
g.Pgmin = Pgmin
g.Qgmax = Qgmax
g.Qgmin = Qgmin
return g
end
end
##################################################################
type Line
arcID::Int
tail::Int # the "to" node
head::Int # the "from" node
r::Float64 # the resistance value
x::Float64 # the reactance value
u::Float64 # the capacity of the line
function Line(arcID, tail, head, r, x, u)
line = new(arcID, tail, head, r, x)
line.u = u
return line
end
end
#########################################
## storage data
type Storage
ID::Int
busidx::Int
Pkmax::Float64
Ekmax::Float64
Aleph::Float64
Co::Float64
Cp::Float64
Ce::Float64
function Storage(ID, busidx, Pkmax, Ekmax, Aleph, Co, Cp, Ce)
s = new(ID)
s.ID = ID
s.busidx = busidx
s.Pkmax = Pkmax
s.Ekmax = Ekmax
s.Aleph = Aleph
s.Co = Co
s.Cp = Cp
s.Ce = Ce
return s
end
end
#########################################
## station data
type Station
ID::Int
busidx::Int
Cf::Float64
function Station(ID, busidx, Cf)
st = new(ID)
st.ID = ID
st.busidx = busidx
st.Cf = Cf
return st
end
end
######################################################################################################
function DataImport(filename_Node, filename_Generator, filename_Line, filename_Storage, filename_Station, filename_SMP)
######################################################################################################
# Bus/Node Data
# busmat = readtable(filename_Node)
busmat = readcsv(filename_Node, header=true)[1]
buses = Bus[]
busIDmap = Dict()
for i in 1:size(busmat,1)
nodeID = i
busIDmap[busmat[i,1]] = i
if i==1
root = busIDmap[busmat[i,1]]
else
root = 0
end
Pd = busmat[i,2]
Qd = busmat[i,3]
R = busmat[i,6]
X = busmat[i,7]
Vmax = busmat[i,4]
Vmin = busmat[i,5]
B = busmat[i,8]
b = Bus(nodeID, root, Pd, Qd, B, R, X, Vmax, Vmin)
push!(buses, b)
end
#######################################################################################################
## generator data
generatorlist = Int[]
generators = Generator[]
# genmat = readtable(filename_Generator)
genmat = readcsv(filename_Generator, header=true)[1]
for i in 1:size(genmat,1)
busidx = genmat[i,1]
Pgmax = genmat[i,2]
Pgmin = genmat[i,3]
Qgmax = genmat[i,4]
Qgmin = genmat[i,5]
cost = genmat[i,6]
g = Generator(busidx, Pgmax, Pgmin, Qgmax, Qgmin, cost)
push!(generators, g)
# setg(buses[busidx], i)
end
#for g in 1:length(generators)
# generators[g].cost = genmat[g,4]
#end
#################################################################
## branch data
# branchmat = readtable(filename_Line)
branchmat = readcsv(filename_Line, header=true)[1]
lines = Line[]
for i in 1:size(branchmat,1)
fbus = busIDmap[branchmat[i,2]]
tbus = busIDmap[branchmat[i,1]]
abus = busIDmap[branchmat[i,1]]
x = branchmat[i,4]
r = branchmat[i,3]
u = branchmat[i,6] # flow limit
push!(buses[tbus].children, fbus)#children
push!(buses[fbus].ancestor, abus)#ancestor
l = Line(i, tbus, fbus, r, x, u)
push!(lines,l)
end
#########################################
## storage data
# storagemat = readtable(filename_Storage)
storagemat = readcsv(filename_Storage, header=true)[1]
storages = Storage[]
for i in 1:size(storagemat,1)
storageID = i
busidx = 1
Pkmax = storagemat[i,2]
Ekmax = storagemat[i,3]
Aleph = storagemat[i,4]
Co = storagemat[i,5]
Cp = storagemat[i,6]
Ce = storagemat[i,7]
temp = Storage(storageID, busidx, Pkmax, Ekmax, Aleph, Co, Cp, Ce)
push!(storages,temp)
end
#########################################
## charging/discharing station data
# stationmat = readtable(filename_Station)
stationmat = readcsv(filename_Station, header=true)[1]
stations = Station[]
for i in 1:size(stationmat,1)
stationID = i
busidx = stationmat[i,2]
Cf = stationmat[i,3]
temp = Station(stationID, busidx, Cf)
push!(stations,temp)
end
#########################################
SMP_raw = readcsv(filename_SMP, header=true)[1]
SMP = SMP_raw[3:end]
return buses, generators,lines, storages, stations, SMP
end
|
proofpile-julia0005-42472 | {
"provenance": "014.jsonl.gz:242473"
} | # SPDX-License-Identifier: X11
# 2020-12-07
# Day 2, Part 1
const reg = r"^(\d*)-(\d*) ([a-z]): ([a-z]*)"
function main()
input = readlines()
cnt = 0
for x ∈ input
validpass(x) && (cnt += 1)
end
println(cnt)
end
function validpass(str::AbstractString)::Bool
m = match(reg, str)
min = parse.(Int, m.captures[1])
max = parse.(Int, m.captures[2])
chr = m.captures[3][1]
pass = m.captures[end]
cnt = 0
for c ∈ pass
c == chr && (cnt += 1)
end
return cnt >= min && cnt <= max
end
main()
|
proofpile-julia0005-42473 | {
"provenance": "014.jsonl.gz:242474"
} | using Docile, Lexicon, JuliaFEM
const api_directory = "api"
"""
Searches recursively all the modules from packages. As documentation grows, it's a bit
troublesome to add all the new modules manually, so this function searches all the modules
automatically.
Parameters
----------
module_: Module
Module where we want to search modules inside
append_list: Array{Module, 1}
Array, where we append Modules as we find them
Returns
-------
None. Void function, which manipulates the append_list
"""
function search_modules!(module_::Module, append_list::Array{Module, 1})
all_names = names(module_, true)
for each in all_names
inner_module = module_.(each)
if (typeof(inner_module) == Module) && !(inner_module in append_list)
push!(append_list, inner_module)
search_modules!(inner_module, append_list)
end
end
end
append_list = Array(Module, 0)
search_modules!(JuliaFEM, append_list)
const modules = append_list
# main_folder = dirname(dirname(@__FILE__))
# this_folder = dirname(@__FILE__)
# file_ = "README.md"
# run(`cp $main_folder/$file_ $this_folder`)
cd(dirname(@__FILE__)) do
# Run the doctests *before* we start to generate *any* documentation.
# for m in modules
# failures = failed(doctest(m))
# if !isempty(failures.results)
# println("\nDoctests failed, aborting commit.\n")
# display(failures)
# exit(1) # Bail when doctests fail.
# end
# end
# Generate and save the contents of docstrings as markdown files.
index = Index()
for mod in modules
Lexicon.update!(index, save(joinpath(api_directory, "$(mod).rst"), mod))
end
save(joinpath(api_directory, "index.rst"), index)
# Add a reminder not to edit the generated files.
# open(joinpath(api_directory, "README.md"), "w") do f
# print(f, """
# Files in this directory are generated using the `build.jl` script. Make
# all changes to the originating docstrings/files rather than these ones.
# """)
# end
# save(joinpath(api_directory, "index.rst"), index; md_subheader = :category)
# info("Adding all documentation changes in $(api_directory) to this commit.")
# success(`git add $(api_directory)`) || exit(1)
end
|
proofpile-julia0005-42474 | {
"provenance": "014.jsonl.gz:242475"
} | # syntax: proto3
using ProtoBuf
import ProtoBuf.meta
# ValueType describes the semantics and measurement units of a value.
mutable struct ValueType <: ProtoType
_type::Int64 # Index into string table.
unit::Int64 # Index into string table.
ValueType(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct ValueType
mutable struct Label <: ProtoType
key::Int64 # Index into string table
# At most one of the following must be present
str::Int64 # Index into string table
num::Int64
# Should only be present when num is present.
# Specifies the units of num.
# Use arbitrary string (for example, "requests") as a custom count unit.
# If no unit is specified, consumer may apply heuristic to deduce the unit.
# Consumers may also interpret units like "bytes" and "kilobytes" as memory
# units and units like "seconds" and "nanoseconds" as time units,
# and apply appropriate unit conversions to these.
num_unit::Int64 # Index into string table
Label(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Label
# Each Sample records values encountered in some program
# context. The program context is typically a stack trace, perhaps
# augmented with auxiliary information like the thread-id, some
# indicator of a higher level request being handled etc.
mutable struct Sample <: ProtoType
# The ids recorded here correspond to a Profile.location.id.
# The leaf is at location_id[0].
location_id::Base.Vector{UInt64}
# The type and unit of each value is defined by the corresponding
# entry in Profile.sample_type. All samples must have the same
# number of values, the same as the length of Profile.sample_type.
# When aggregating multiple samples into a single sample, the
# result has a list of values that is the elemntwise sum of the
# lists of the originals.
value::Base.Vector{Int64}
# label includes additional context for this sample. It can include
# things like a thread id, allocation size, etc
label::Base.Vector{Label}
Sample(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Sample
const __pack_Sample = Symbol[:location_id,:value]
meta(t::Type{Sample}) = meta(t, ProtoBuf.DEF_REQ, ProtoBuf.DEF_FNUM, ProtoBuf.DEF_VAL, true, __pack_Sample, ProtoBuf.DEF_WTYPES, ProtoBuf.DEF_ONEOFS, ProtoBuf.DEF_ONEOF_NAMES, ProtoBuf.DEF_FIELD_TYPES)
mutable struct Mapping <: ProtoType
# Unique nonzero id for the mapping.
id::UInt64
# Address at which the binary (or DLL) is loaded into memory.
memory_start::UInt64
# The limit of the address range occupied by this mapping.
memory_limit::UInt64
# Offset in the binary that corresponds to the first mapped address.
file_offset::UInt64
# The object this entry is loaded from. This can be a filename on
# disk for the main binary and shared libraries, or virtual
# abstractions like "[vdso]".
filename::Int64 # Index into string table
# A string that uniquely identifies a particular program version
# with high probability. E.g., for binaries generated by GNU tools,
# it could be the contents of the .note.gnu.build-id field.
build_id::Int64 # Index into string table
# The following fields indicate the resolution of symbolic info.
has_functions::Bool
has_filenames::Bool
has_line_numbers::Bool
has_inline_frames::Bool
Mapping(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Mapping
mutable struct Line <: ProtoType
# The id of the corresponding profile.Function for this line.
function_id::UInt64
# Line number in source code.
line::Int64
Line(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Line
# Describes function and line table debug information.
mutable struct Location <: ProtoType
# Unique nonzero id for the location. A profile could use
# instruction addresses or any integer sequence as ids.
id::UInt64
# The id of the corresponding profile.Mapping for this location.
# It can be unset if the mapping is unknown or not applicable for
# this profile type.
mapping_id::UInt64
# The instruction address for this location, if available. It
# should be within [Mapping.memory_start...Mapping.memory_limit]
# for the corresponding mapping. A non-leaf address may be in the
# middle of a call instruction. It is up to display tools to find
# the beginning of the instruction if necessary.
address::UInt64
# Multiple line indicates this location has inlined functions,
# where the last entry represents the caller into which the
# preceding entries were inlined.
#
# E.g., if memcpy() is inlined into printf:
# line[0].function_name == "memcpy"
# line[1].function_name == "printf"
line::Base.Vector{Line}
# Provides an indication that multiple symbols map to this location's
# address, for example due to identical code folding by the linker. In that
# case the line information above represents one of the multiple
# symbols. This field must be recomputed when the symbolization state of the
# profile changes.
is_folded::Bool
Location(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Location
mutable struct Function <: ProtoType
# Unique nonzero id for the function.
id::UInt64
# Name of the function, in human-readable form if available.
name::Int64 # Index into string table
# Name of the function, as identified by the system.
# For instance, it can be a C++ mangled name.
system_name::Int64 # Index into string table
# Source file containing the function.
filename::Int64 # Index into string table
# Line number in source file.
start_line::Int64
Function(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Function
mutable struct Profile <: ProtoType
# A description of the samples associated with each Sample.value.
# For a cpu profile this might be:
# [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]]
# For a heap profile, this might be:
# [["allocations","count"], ["space","bytes"]],
# If one of the values represents the number of events represented
# by the sample, by convention it should be at index 0 and use
# sample_type.unit == "count".
sample_type::Base.Vector{ValueType}
# The set of samples recorded in this profile.
sample::Base.Vector{Sample}
# Mapping from address ranges to the image/binary/library mapped
# into that address range. mapping[0] will be the main binary.
mapping::Base.Vector{Mapping}
# Useful program location
location::Base.Vector{Location}
# Functions referenced by locations
_function::Base.Vector{Function}
# A common table for strings referenced by various messages.
# string_table[0] must always be "".
string_table::Base.Vector{AbstractString}
# frames with Function.function_name fully matching the following
# regexp will be dropped from the samples, along with their successors.
drop_frames::Int64 # Index into string table.
# frames with Function.function_name fully matching the following
# regexp will be kept, even if it matches drop_functions.
keep_frames::Int64 # Index into string table.
# The following fields are informational, do not affect
# interpretation of results.
# Time of collection (UTC) represented as nanoseconds past the epoch.
time_nanos::Int64
# Duration of the profile, if a duration makes sense.
duration_nanos::Int64
# The kind of events between sampled ocurrences.
# e.g [ "cpu","cycles" ] or [ "heap","bytes" ]
period_type::ValueType
# The number of events between sampled occurrences.
period::Int64
# Freeform text associated to the profile.
comment::Base.Vector{Int64} # Indices into string table.
# Index into the string table of the type of the preferred sample
# value. If unset, clients should default to the last sample value.
default_sample_type::Int64
Profile(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o)
end #mutable struct Profile
const __pack_Profile = Symbol[:comment]
meta(t::Type{Profile}) = meta(t, ProtoBuf.DEF_REQ, ProtoBuf.DEF_FNUM, ProtoBuf.DEF_VAL, true, __pack_Profile, ProtoBuf.DEF_WTYPES, ProtoBuf.DEF_ONEOFS, ProtoBuf.DEF_ONEOF_NAMES, ProtoBuf.DEF_FIELD_TYPES)
export Profile, ValueType, Sample, Label, Mapping, Location, Line, Function
|
proofpile-julia0005-42475 | {
"provenance": "014.jsonl.gz:242476"
} | using JLD2, DataFrames, Serialization, LinearAlgebra, Distributions
import GEAS:breeding, sim_QTL, summarize, create_storage
function gpar(nqtl)
Dict(
:nSire=> 100,
:nDam => 200, # male:female = 1:2
:nSib => 40,
:nC7e => 30, # number of sib to be challenged
:nG8n => 11, # number of generations
:nQTL => [nqtl, nqtl],
:h² => [.5, .5],
:p8e => .5, # percentage of death after challenge
:e19e => 1., # edit_successsful_rate
:w4t => 1, # weight on the binary trait EBV
:nK3n => 10, # number of known QTL on binary trait
:dd => 0.01, # A small value to be added to diagonals
:edit => false,
:fix => false
)
end
function t_2021_09_12(nrpt = 10)
BLAS.set_num_threads(12)
nqtl = [100, 500]
isdir("dat/tmp") || mkpath("dat/tmp")
@load "dat/run/base.jld2" base
qdist = [Normal(), Laplace()]
rst = DataFrame()
ng8n = 11
for nq in nqtl
@info "With n_QTL = $nq"
dpr = gpar(nq) # parameter dictionary
for qd in qdist
@info "QTL distribution $(string(qd))"
for rpt in 1:nrpt
@info "Repeat $rpt of $nrpt"
@info "Preparing common base + generation 1"
dpr[:fix], dpr[:edit] = false, false
par = (; dpr...)
qtl = sim_QTL(base, par.nQTL..., d=qd)
snp, ped = create_storage(base, par, qtl)
serialize("dat/tmp/snp.ser", snp)
serialize("dat/tmp/ped.ser", ped)
@info "Selection with SNP-BLUP"
par = (; dpr...)
breeding(ped, snp, base, qtl, par)
df = summarize(ped.prd, snp.prd, qtl[2])
df.repeat = repeat([rpt], ng8n)
df.dist = repeat([string(qd)[1:3]], ng8n)
df.nq = repeat([nq], ng8n)
df.method = repeat(["SBLP"], ng8n)
append!(rst, df)
@info "Selection with emphasis on top $(par.nK3n) QTL"
dpr[:fix], dpr[:edit] = true, false
par = (; dpr...)
snp = deserialize("dat/tmp/snp.ser")
ped = deserialize("dat/tmp/ped.ser")
breeding(ped, snp, base, qtl, par)
df = summarize(ped.prd, snp.prd, qtl[2])
df.repeat = repeat([rpt], ng8n)
df.dist = repeat([string(qd)[1:3]], ng8n)
df.nq = repeat([nq], ng8n)
df.method = repeat(["Fix"], ng8n)
append!(rst, df)
dpr[:fix], dpr[:edit] = false, true
par = (; dpr...)
snp = deserialize("dat/tmp/snp.ser")
ped = deserialize("dat/tmp/ped.ser")
breeding(ped, snp, base, qtl, par)
df = summarize(ped.prd, snp.prd, qtl[2])
df.repeat = repeat([rpt], ng8n)
df.dist = repeat([string(qd)[1:3]], ng8n)
df.nq = repeat([nq], ng8n)
df.method = repeat(["Edit"], ng8n)
append!(rst, df)
serialize("dat/rst/3w-cmp.ser", rst)
end
end
end
end
function shortcut_2021_09_12()
BLAS.set_num_threads(12)
dpr = gpar(100)
par = (; dpr...)
@load "dat/run/base.jld2" base
qtl = sim_QTL(base, par.nQTL...)
snp, ped = create_storage(base, par, qtl)
breeding(ped, snp, base, qtl, par)
serialize("dat/tmp/sample.ser", Dict(:snp=>snp, :ped=>ped, :qtl=>qtl))
end
|
proofpile-julia0005-42476 | {
"provenance": "014.jsonl.gz:242477"
} | """
T Macrina
160310
Change to directory with the following files:
* segmentation.h5
* classification.csv
* class_description.csv
Run
`julia semantic.jl`
"""
using HDF5
"""
`CREATE_SEMANTIC_MASK` - create indexed image for semantic class of every voxel
Args:
* indexed_matrix: matrix indexed with segment IDs
* segment_class: dictionary, keys are segment IDs, values are the class
* uncertain_id: class index for a segment ID that was not classified
Returns:
* semantic_mask: matrix indexed with class IDs
"""
function create_semantic_mask(indexed_matrix, segment_class, uncertain_id=0)
semantic_mask = ones(UInt8, size(indexed_matrix)...)
for i in 1:length(indexed_matrix)
k = indexed_matrix[i]
if haskey(segment_class, k)
semantic_mask[i] = UInt8(segment_class[k])
else
semantic_mask[i] = uncertain_id
end
end
return semantic_mask
end
"""
`CHECK_SEGMENT_IDS` - Check that all segments have been classified
"""
function check_segment_ids(indexed_matrix, segment_class)
segments_marked = Set(unique(indexed_matrix))
segments_classified = Set(keys(segment_class))
println("Marked but not classified: ", length(setdiff(segments_marked,
segments_classified)))
println("Classified but not marked: ", length(setdiff(segments_classified,
segments_marked)))
return assert(Set(segment_ids) == Set(keys(segment_class)))
end
"""
`WRITE_SEMANTIC_MASK` - create indexed image for semantic class of every voxel
Args:
* dir: path to the folder containing the following files
** segmentation.h5: the segment indexed volume with attribute "main"
** classification.csv: 2 column table - segment id, class id
** class_description.csv: 2 column table - class id, class description
Returns:
Writes out H5 file, semantic_mask - class id indexed image
* "main": matrix indexed with class ids &
* "class_id": table of class ids
* "class_description": table of class descriptions (match class ids)
"""
function write_semantic_mask(dir)
segmentation_fn = joinpath(dir, "segmentation.h5")
classification_fn = joinpath(dir, "classification.csv")
class_description_fn = joinpath(dir, "class_description.csv")
semantic_mask_fn = joinpath(dir, "semantic_mask.h5")
indexed_matrix = h5read(segmentation_fn, "main")
segment_class = convert_table_to_dict(readdlm(classification_fn, Int))
semantic_mask = create_semantic_mask(indexed_matrix, segment_class)
classes = readdlm(class_description_fn)
class_id = map(UInt8, classes[:,1])
class_description = map(String, classes[:,2])
f = h5open(semantic_mask_fn, "w")
f["main"] = semantic_mask
f["class_id"] = class_id
f["class_description"] = class_description
close(f)
end
function convert_table_to_dict(table)
d = Dict()
for i in 1:size(table,1)
d[table[i,1]] = table[i,2]
end
return d
end
if !isinteractive()
write_semantic_mask(pwd())
end |
proofpile-julia0005-42477 | {
"provenance": "014.jsonl.gz:242478"
} | using DataFrames
using CSV
using FileIO
using Statistics
include("experiments/experimentalutils.jl")
resultsFolder = mainfolder * "experiments/twostagesvae/"
files = readdir(resultsFolder)
results = []
for f in files
if isfile(resultsFolder * f)
push!(results, CSV.read(resultsFolder * f))
end
end
results = vcat(results...)
aggres = []
for d in unique(results[:dataset])
ddf = results[results[:dataset] .== d, :]
mean_auc_pxv = mean(ddf[:auc_pxv])
mean_auc_pz = mean(ddf[:auc_pz])
push!(aggres, DataFrame(dataset = d, auc_pxv = mean_auc_pxv, auc_pz = mean_auc_pz))
end
aggres = vcat(aggres...)
|
proofpile-julia0005-42478 | {
"provenance": "014.jsonl.gz:242479"
} | module DataFittingBasics
import DataFitting: AbstractDomain, Domain_1D, Domain_2D,
Parameter, AbstractComponent, AbstractComponentData,
cdata, evaluate!
include("OffsetSlope.jl")
include("Polynomial.jl")
include("Gaussian.jl")
include("Lorentzian.jl")
end
|
proofpile-julia0005-42479 | {
"provenance": "014.jsonl.gz:242480"
} | #=
Visual illustration of the law of large numbers.
@author : Spencer Lyon <[email protected]>
Victoria Gregory <[email protected]>
References
----------
Based off the original python file illustrates_lln.py
=#
using Plots
pyplot()
using Distributions
using LaTeXStrings
n = 100
srand(42) # reproducible results
# == Arbitrary collection of distributions == #
distributions = Dict("student's t with 10 degrees of freedom" => TDist(10),
"beta(2, 2)" => Beta(2.0, 2.0),
"lognormal LN(0, 1/2)" => LogNormal(0.5),
"gamma(5, 1/2)" => Gamma(5.0, 2.0),
"poisson(4)" => Poisson(4),
"exponential with lambda = 1" => Exponential(1))
num_plots = 3
dist_data = zeros(num_plots, n)
sample_means = []
dist_means = []
titles = []
for i = 1:num_plots
dist_names = collect(keys(distributions))
# == Choose a randomly selected distribution == #
name = dist_names[rand(1:length(dist_names))]
dist = pop!(distributions, name)
# == Generate n draws from the distribution == #
data = rand(dist, n)
# == Compute sample mean at each n == #
sample_mean = Array(Float64, n)
for j=1:n
sample_mean[j] = mean(data[1:j])
end
m = mean(dist)
dist_data[i, :] = data'
push!(sample_means, sample_mean)
push!(dist_means, m*ones(n))
push!(titles, name)
end
# == Plot == #
N = repmat(reshape(repmat(linspace(1, n, n), 1, num_plots)', 1, n*num_plots), 2, 1)
heights = [zeros(1,n*num_plots); reshape(dist_data, 1, n*num_plots)]
plot(N, heights, layout=(3, 1), label="", color=:grey, alpha=0.5)
plot!(1:n, dist_data', layout=(3, 1), color=:grey, markershape=:circle,
alpha=0.5, label="", linewidth=0)
plot!(1:n, sample_means, linewidth=3, alpha=0.6, color=:green, legend=:topleft,
layout=(3, 1), label=[LaTeXString("\$\\bar{X}_n\$") "" ""])
plot!(1:n, dist_means, color=:black, linewidth=1.5, layout=(3, 1),
linestyle=:dash, grid=false, label=[LaTeXString("\$\\mu\$") "" ""])
plot!(title=titles')
|
proofpile-julia0005-42480 | {
"provenance": "014.jsonl.gz:242481"
} | mutable struct DynamicBuffer{O,A} <: AbstractBuffer
observations::Vector{O}
actions::Vector{A}
rewards::Vector{Float64}
terminals::Vector{Bool}
episodes::Int
DynamicBuffer{O,A}() where {O,A} = new(Vector{O}(), A[], Float64[], Bool[], 0)
end
DynamicBinaryBuffer{A} = DynamicBuffer{Vector{Bool},A}
function add!(buffer::DynamicBuffer{O,A}, observation::O) where {O,A}
(length(buffer.observations) != length(buffer.actions)) && !buffer.terminals[end] && error(
"Incomplete transition"
)
push!(buffer.observations, observation)
push!(buffer.terminals, false)
nothing
end
function add!(buffer::DynamicBuffer{O,A}, action::A, reward::Float64, next_obs::O, terminal::Bool) where {O,A}
length(buffer.observations) == length(buffer.actions) && error(
"Incomplete transition"
)
push!(buffer.actions, action)
push!(buffer.rewards, reward)
push!(buffer.observations, next_obs)
push!(buffer.terminals, terminal)
terminal && (buffer.episodes += 1)
nothing
end
function reset!(buffer::DynamicBuffer)
empty!(buffer.observations)
empty!(buffer.actions)
empty!(buffer.rewards)
empty!(buffer.terminals)
buffer.episodes = 0
nothing
end
function Base.iterate(buffer::DynamicBuffer, state=1)
state > length(buffer.actions) && return nothing
return ((buffer.observations[state],
buffer.actions[state],
buffer.rewards[state],
buffer.observations[state + 1],
buffer.terminals[state + 1]),
state + 1)
end
function Base.iterate(rbuffer::Base.Iterators.Reverse{DynamicBuffer{O,A}}, state=length(rbuffer.itr.actions)) where {O,A}
state < 1 && return nothing
return ((rbuffer.itr.observations[state],
rbuffer.itr.actions[state],
rbuffer.itr.rewards[state],
rbuffer.itr.observations[state + 1],
rbuffer.itr.terminals[state + 1]),
state - 1)
end
Base.length(buffer::DynamicBuffer) = length(buffer.actions)
Base.eltype(buffer::DynamicBuffer{O,A}) where {O,A} = Tuple{O,A,Float64,O,Bool} |
proofpile-julia0005-42481 | {
"provenance": "014.jsonl.gz:242482"
} |
# # This function fits Kernel Density and evaluates density at v (Needs KernelDensity.jl)
# function useKDE2fit(x::AbstractArray{Float64,3},v::AbstractMatrix)
# q,w,e=size(x)
# y = similar(x,q,w)
# for a=1:q, s=1:w
# z = kde( x[a,s,:] )
# y[a,s] = pdf(z, v[a,s])
# end
# return y
# end
# This function fits Normal pdf and evaluates density at v (Needs Distributions.jl)
function useN2fit(x::AbstractArray{Float64,3}, v::AbstractMatrix)
q, w, e = size(x)
y = similar(x, q, w)
for a in 1:q, s in 1:w
z = fit(Normal, x[a,s,:])
y[a,s] = pdf.(z, v[a,s])
end
return y
end
# fitting the multivariate version
function useMvN2fit(x::AbstractArray{Float64,3}, v::AbstractMatrix)
# w is no of variables
# q is no of forecast horizon
# e are number of draws
q, w, e = size(x)
y = similar(x, q)
for a = 1:q
z = fit(MvNormal, x[a,:,:])
y[a] = pdf(z, v[a,:])
end
return y
end
#############################################
## fitting PIT
"
PSPIT Calculate Probability Integral Transform time series.
[Z, ZHIST] = PSPIT(FORECAST, OBS) obtains the PIT
z-series for a given vector of observations in OBS, with associated
empirical probability forecasts given in the matrix FORECASTS. Each
row of FORECASTS contains an empirical distribution for each element
of OBS. [...] = PSPIT(..., ZBINS) specifies the number of histogram bins
on the interval [0,1] (default is 10 bins). [...] =
PSPIT(..., ZBINS, ZSIGNIF) specifies a significance probability for the
null hypothesis test of the z-series bin counts (default 0.95 - 95%
significance level).
Output parameter Z is the z-series, and ZHIST is a structure
with the following elements:
'h.counts' - Count of the number of z-series elements falling within
each histogram bin on the interval [0,1].
'h.centres' - Z-series histogram bin centres.
'confmean' - Expected z-series histogram count, under the null
hypothesis of z-series being iid uniform on [0,1].
'confpos' - Upper confidence interval for the z-series histogram
counts, under the null hypothesis of z-series being iid
uniform on [0,1].
'confneg' - Lower confidence interval for the z-series histogram
counts, under the null hypothesis of z-series being iid
uniform on [0,1].
(cc) Max Little, 2008. This software is licensed under the
Attribution-Share Alike 2.5 Generic Creative Commons license:
http://creativecommons.org/licenses/by-sa/2.5/
If you use this work, please cite:
Little MA et al. (2008), Parsimonious Modeling of UK Daily Rainfall for
Density Forecasting, in Geophysical Research Abstracts, EGU General
Assembly, Vienna 2008, Volume 10.
"
function pspit(forecasts::Matrix, obs::Vector; zsignif::Float64 = 0.95, nbins::Int = 10, returnZ::Bool=false)
N = length(obs)
M, S = size(forecasts)
@assert M == N "Length of OBS must match number of rows in FORECASTS"
z = similar(obs)
freqs = (1. : S) ./ S
scale = similar(forecasts, S)
for i = 1:N
scale .= sort(forecasts[i,:])
@views obsI = obs[i]
if obsI < scale[1]
z[i] = 0.
elseif obsI > scale[end]
z[i] = 1.
else
idx = findlast(scale .<= obsI)
if idx > 1
z[i] = freqs[idx - 1] + rand(Uniform()) * (freqs[idx] - freqs[idx - 1])
else
z[i] = 0.
end
end
end
if returnZ
return z
end
# Obtaining a histogram
bins = LinRange(0., 1., nbins+1)
h = fit(Histogram, z, bins)
# Find the approximate confintv confidence interval for
# binomially-distributed PIT histogram bin counts, under
# the assumption of iid U(0,1) PIT z-series
bprob = 1. / nbins
meanheight = convert(Int, floor(N/nbins))::Int
binocumul(x) = cdf(Binomial(N,bprob), x)
for confwidth = 1:meanheight
cip = meanheight + confwidth
cin = meanheight - confwidth
ci = binocumul(cip) - binocumul(cin - 1.)
if ci >= zsignif
return (z=z, zhist=h, confmean= meanheight, confpos = cip, confneg = cin)
end
end
@warn "the confidence interval could not be compute due to few observation"
return (z=z, zhist=h, confmean= meanheight)
end
"PSCRPS Calculate Continuous-Ranked Probability Score.
CRPS = PSCRPS(FORECAST, OBS) obtains the CRPS for a given vector of
observations in OBS, with associated empirical probability forecasts
given in the matrix FORECASTS. Each row of FORECASTS contains an
empirical distribution for each element of OBS. Output parameter is the
CRPS value."
function pscrps(forecasts::Matrix, obs::Vector)
N = length(obs)
M, S = size(forecasts)
@assert M == N "Length of OBS must match number of rows in FORECASTS"
crps_individual = similar(obs)
for i = 1:N
crps1 = mean(abs.(forecasts[i, :] .- obs[i]))
crps2 = mean(abs.(diff(forecasts[i, randperm(S)])))
crps_individual[i] = crps1 - 0.5 * crps2
end
crps = mean(crps_individual)
return (crps = crps, crps_individual = crps_individual)
end |
proofpile-julia0005-42482 | {
"provenance": "014.jsonl.gz:242483"
} | module FrostHelperBadelineChaserBlock
using ..Ahorn, Maple
@mapdef Entity "FrostHelper/BadelineChaserBlock" BadelineChaserBlock(x::Integer, y::Integer, width::Integer=Maple.defaultBlockWidth, height::Integer=Maple.defaultBlockWidth, reversed::Bool=false)
const placements = Ahorn.PlacementDict(
"Badeline Chaser Block (Frost Helper)" => Ahorn.EntityPlacement(
BadelineChaserBlock,
"rectangle",
)
)
Ahorn.minimumSize(entity::BadelineChaserBlock) = 16, 16
Ahorn.resizable(entity::BadelineChaserBlock) = true, true
function Ahorn.selection(entity::BadelineChaserBlock)
x, y = Ahorn.position(entity)
width = Int(get(entity.data, "width", 8))
height = Int(get(entity.data, "height", 8))
return Ahorn.Rectangle(x, y, width, height)
end
function getTextures(entity::BadelineChaserBlock)
return "objects/FrostHelper/badelineChaserBlock/solid", "objects/FrostHelper/badelineChaserBlock/emblemsolid"
end
function renderSwapBlock(ctx::Ahorn.Cairo.CairoContext, x::Number, y::Number, width::Number, height::Number, midResource::String, frame::String)
midSprite = Ahorn.getSprite(midResource, "Gameplay")
tilesWidth = div(width, 8)
tilesHeight = div(height, 8)
for i in 2:tilesWidth - 1
Ahorn.drawImage(ctx, frame, x + (i - 1) * 8, y, 8, 0, 8, 8)
Ahorn.drawImage(ctx, frame, x + (i - 1) * 8, y + height - 8, 8, 16, 8, 8)
end
for i in 2:tilesHeight - 1
Ahorn.drawImage(ctx, frame, x, y + (i - 1) * 8, 0, 8, 8, 8)
Ahorn.drawImage(ctx, frame, x + width - 8, y + (i - 1) * 8, 16, 8, 8, 8)
end
for i in 2:tilesWidth - 1, j in 2:tilesHeight - 1
Ahorn.drawImage(ctx, frame, x + (i - 1) * 8, y + (j - 1) * 8, 8, 8, 8, 8)
end
Ahorn.drawImage(ctx, frame, x, y, 0, 0, 8, 8)
Ahorn.drawImage(ctx, frame, x + width - 8, y, 16, 0, 8, 8)
Ahorn.drawImage(ctx, frame, x, y + height - 8, 0, 16, 8, 8)
Ahorn.drawImage(ctx, frame, x + width - 8, y + height - 8, 16, 16, 8, 8)
Ahorn.drawImage(ctx, midSprite, x + div(width - midSprite.width, 2), y + div(height - midSprite.height, 2))
end
function Ahorn.renderSelectedAbs(ctx::Ahorn.Cairo.CairoContext, entity::BadelineChaserBlock, room::Maple.Room)
startX, startY = Int(entity.data["x"]), Int(entity.data["y"])
width = Int(get(entity.data, "width", 32))
height = Int(get(entity.data, "height", 32))
frame, mid = getTextures(entity)
renderSwapBlock(ctx, startX, startY, width, height, mid, frame)
end
function Ahorn.renderAbs(ctx::Ahorn.Cairo.CairoContext, entity::BadelineChaserBlock, room::Maple.Room)
startX, startY = Int(entity.data["x"]), Int(entity.data["y"])
width = Int(get(entity.data, "width", 32))
height = Int(get(entity.data, "height", 32))
frame, mid = getTextures(entity)
renderSwapBlock(ctx, startX, startY, width, height, mid, frame)
end
end
|
proofpile-julia0005-42483 | {
"provenance": "014.jsonl.gz:242484"
} | ################################################################################
# Visuals
################################################################################
function visualize(env::Environment, traj::Vector{Vector{T}}) where T
@assert size(traj[1]) == size(env.state)
storage = generate_storage(env.mechanism, [env.representation == :minimal ? minimal_to_maximal(env.mechanism, x) : x for x in traj])
visualize(env.mechanism, storage,
vis=env.vis)
end
################################################################################
# Miscellaneous
################################################################################
type2symbol(H) = Symbol(lowercase(String(H.name.name)))
|
proofpile-julia0005-42484 | {
"provenance": "014.jsonl.gz:242485"
} | module TestWater
using Test
using LinearAlgebra
using Canopy.Constants: T_0
using Canopy.Water
rho_w_validat = hcat(
[0., 4., 5., 10., 15., 20., 25., 30.] .+ T_0,
[999.8395, 999.9720, 999.96, 999.7026, 999.1026,
998.2071, 997.0479, 995.6502])
pK_w_validat = hcat(
collect(0:25:100) .+ T_0,
[14.95, 13.99, 13.26, 12.70, 12.25])
@time @testset "Canopy.Water" begin
@test norm(water_density.(rho_w_validat[:, 1]) -
rho_w_validat[:, 2]) / norm(rho_w_validat[:, 2]) < 1e-4
@test norm(water_dissoc.(pK_w_validat[:, 1]) - pK_w_validat[:, 2]) /
norm(pK_w_validat[:, 2]) < 1e-3
end
end # module
|
proofpile-julia0005-42485 | {
"provenance": "014.jsonl.gz:242486"
} | using LinearAlgebra: eigen, cholesky, diag, diagm, UpperTriangular, LowerTriangular
using Statistics: mean
"""
Functions for mvrnorm, covriance to correlation conversion (cov2cor)
and vice versa cor2cov.
"""
function generateRandomMatrix(p::Int64, n::Int64)
x = rand(n, p)
return x' * x
end
"""
Function to convert covariance to correlation matrix
"""
function cov2cor(E::Array{T, 2}) where {T <: AbstractFloat}
p = size(E)[1]
d::Array{T, 1} = diag(E)
d .^= -0.5
C = (d * d') .* E
C = 0.5.*(UpperTriangular(C) + LowerTriangular(C)')
C = C + C'
for i in 1:p
C[i, i] = T(1)
end
return C
end
"""
Function to convert correlation to covariance matrix
d is the variance vector of the covariance matrix
"""
function cor2cov(C::Array{T, 2}, d::Array{T, 1}) where {T <: AbstractFloat}
p = size(C)[1]
d1 = d .^0.5
E = (d1 * d1') .* C
E = 0.5.*(UpperTriangular(E) + LowerTriangular(E)')
E = E + E'
for i in 1:p
E[i, i] /= 2
end
return E
end
"""
Function that carries samples from a multivariate normal
distribution when supplied with the number of samples (n),
the mean vector (mu), and the covariance matrix (sigma).
Example:
mvrnorm(100, zeros(10), cov2cor(generateRandomMatrix(10, 10000)))
"""
function mvrnorm(n::Int64, mu::Array{T, 1}, sigma::Array{T, 2}) where {T <: AbstractFloat}
p = size(sigma)[1]
A = cholesky(sigma).L
output = zeros(T, (n, p))
for i = 1:n
output[i, :] = mu + A * randn(p)
end
return output
end
#=======================================================================================================#
"""
Random Number Generator for Beta (and Uniform) Distribution
"""
abstract type AbstractSampleDistribution end
struct BetaSampleDistribution{T} <: AbstractSampleDistribution
alpha::T
beta::T
function BetaSampleDistribution(alpha::T, beta::T) where {T <: AbstractFloat}
return new{T}(alpha, beta)
end
function BetaSampleDistribution(alpha::T, beta::T) where {T <: Integer}
return BetaSampleDistribution(Float64(alpha), Float64(beta))
end
end
function calcSample(ualpha::T, vbeta::T) where {T <: AbstractFloat}
return ualpha/(ualpha + vbeta)
end
function condition(ualpha::T, vbeta::T) where {T <: AbstractFloat}
return (ualpha + vbeta) > T(1)
end
"""
Version 1 of the random sample from beta distribution
Reference: C. P. Robert, G. Casella,
Monte Carlo Statistical Methods, Example 2.11 p44
"""
function sample(distrib::BetaSampleDistribution{T}, shape::Tuple{Vararg{Int64}}) where {T <: AbstractFloat}
n = prod(shape)
U = rand(T, n); V = rand(T, n)
Y::Array{T} = zeros(T, shape)
ialpha = 1/distrib.alpha; ibeta = 1/distrib.beta
for i in 1:n
u::T = U[i]; v::T = V[i]
ualpha = u .^ialpha; vbeta = v .^ibeta;
while condition(ualpha, vbeta)
u = rand(T, 1)[1]; v = rand(T, 1)[1]
ualpha = u .^ialpha; vbeta = v .^ibeta;
end
Y[i] = calcSample(ualpha, vbeta)
end
return Y
end
function sample(distrib::BetaSampleDistribution{T}, shape::Int64) where {T <: AbstractFloat}
return sample(distrib, (shape,))
end
#=======================================================================================================#
struct UniformSampleDistribution{T} <: AbstractSampleDistribution
min::T
max::T
function UniformSampleDistribution(min::T, max::T) where {T <: AbstractFloat}
@assert(min < max, "Minimum value is not less than maximum value")
return new{T}(min, max)
end
function UniformSampleDistribution(min::T, max::T) where {T <: Integer}
return UniformSampleDistribution(Float64(min), Float64(max))
end
end
function sample(distrib::UniformSampleDistribution{T}, shape::Tuple{Vararg{Int64}}) where {T <: AbstractFloat}
rsample::Array{T} = rand(T, shape)
return (rsample .* (distrib.max - distrib.min)) .+ distrib.min
end
function sample(distrib::UniformSampleDistribution{T}, shape::Int64) where {T <: AbstractFloat}
return sample(distrib, (shape,))
end
function Base.min(x::Array{T}) where {T <: AbstractFloat}
n = length(x)
@assert(n > 0, "Length of array is zero.")
if n == 1
return x[1]
end
ret::T = x[1]
for i in 2:n
ret = ret > x[i] ? x[i] : ret
end
return ret
end
function Base.max(x::Array{T}) where {T <: AbstractFloat}
n = length(x)
@assert(n > 0, "Length of array is zero.")
if n == 1
return x[1]
end
ret::T = x[1]
for i in 2:n
ret = ret < x[i] ? x[i] : ret
end
return ret
end
function Base.range(x::Array{T}) where {T <: AbstractFloat}
return [min(x), max(x)]
end
#=======================================================================================================#
"""
Identity Matrix
"""
function I(::Type{T}, p::Int64) where {T <: Number}
x = zeros(T, (p, p))
for i in 1:p
x[i, i] = T(1)
end
return x
end
"""
Convenience overload
"""
function I(p::Int64)
return I(Float64, p)
end
"""
Types for generating random correlation matrices
"""
abstract type AbstractRandomCorrelationMatrix end
struct BetaGenerator <: AbstractRandomCorrelationMatrix end
struct OnionGenerator <: AbstractRandomCorrelationMatrix end
struct UniformGenerator <: AbstractRandomCorrelationMatrix end
struct VineGenerator <: AbstractRandomCorrelationMatrix end
"""
Vine method for generating a random correlation matrix
Source:
https://stats.stackexchange.com/questions/2746/how-to-efficiently-generate-random-positive-semidefinite-correlation-matrices/125017#125017
Check it with to make sure the solutions are correct:
Journal of Multivariate Analysis 100 (2009) 1989–2001
Generating random correlation matrices based on vines and extended
onion method. Daniel Lewandowski(a)*, Dorota Kurowicka (a),
Harry Joe (b).
randomCorrelationMatrix(VineGenerator(), 10, 2.0)
"""
function randomCorrelationMatrix(::VineGenerator, d::Int64, eta::T) where {T <: AbstractFloat}
beta = eta + (d - 1)/2
P = zeros(T, (d, d))
S = I(T, d)
for k in 1:(d - 1)
beta = beta - (1/2)
distrib = BetaSampleDistribution(beta, beta)
for i in (k + 1):d
P[k, i] = sample(distrib, 1)[1]
P[k, i] = (P[k, i] - 0.5)*2
p = P[k, i]
for l in (k - 1):-1:1
p = p * sqrt((1 - P[l, i]^2)*(1 - P[l, k]^2)) + P[l, i]*P[l, k]
end
S[k, i] = p
S[i, k] = p
end
end
return S
end
"""
Random correlation matrix using the Onion Generator
"""
function randomCorrelationMatrix(::OnionGenerator, d::Int64, eta::T) where {T <: AbstractFloat}
beta::T = eta + (d - 2)/2
distrib = BetaSampleDistribution(beta, beta)
u = sample(distrib, 1)[1]
r = I(T, 2)
r[1, 2] = r[2, 1] = 2*u - 1
for k in 2:(d - 1)
beta -= T(1/2)
distrib = BetaSampleDistribution(T(k/2), beta)
y = sample(distrib, 1)[1]
U = rand(T, k)
w = sqrt(y) .* U
ev = eigen(r)
A = ev.vectors * diagm(abs.(ev.values).^(0.5)) * ev.vectors'
z = A * w
r = [r z; z' T(1)]
end
return r
end
"""
Random correlation matrix by sampling from Beta Distribution
# Example:
randomCorrelationMatrix(BetaGenerator(), 10, (1.0, 1.0))
"""
function randomCorrelationMatrix(::BetaGenerator, d::Int64, (alpha, beta)::Tuple{T, T}) where {T <: AbstractFloat}
distrib = BetaSampleDistribution(alpha, beta)
r = sample(distrib, (d, d))
# Change range to (-1, 1)
r .= (r .* 2) .+ T(-1)
# Symmetry
r .= T(0.5) .* (r + r')
for i in 1:d
r[i, i] = T(1)
end
ev = eigen(r)
r = ev.vectors * diagm(sort(abs.(ev.values))) * ev.vectors'
maxR = max(r)
r ./= maxR
r .= T(0.5) .* (r + r')
for i in 1:d
r[i, i] = T(1)
end
return r
end
"""
Random correlation matrix by sampling from Uniform Distribution
# Example:
randomCorrelationMatrix(Float64, UniformGenerator(), 10)
"""
function randomCorrelationMatrix(::Type{T}, ::UniformGenerator, d::Int64) where {T <: AbstractFloat}
distrib = UniformSampleDistribution(T(-1), T(1))
r = sample(distrib, (d, d))
r .= T(0.5) .* (r + r')
for i in 1:d
r[i, i] = T(1)
end
ev = eigen(r)
r = ev.vectors * diagm(sort(abs.(ev.values))) * ev.vectors'
maxR = max(abs.(r)...)
r ./= maxR
r .= T(0.5) .* (r + r')
for i in 1:d
r[i, i] = T(1)
end
return r
end
#=======================================================================================================#
"""
Function to simulate X and eta, p = number of parameters
(including intercept), n = number of samples.
# Example:
using Random: seed!
seed!(0)
simulateData(Float64, 10, 1000)
"""
function simulateData(::Type{T}, p::Int64, n::Int64, delta::T = T(0)) where {T <: AbstractFloat}
corr = randomCorrelationMatrix(T, UniformGenerator(), p)
mu = zeros(T, p)
X = mvrnorm(n, mu, corr)
#= The intercept =#
X[:, 1] .= T(1)
b = zeros(T, p)
idist = UniformSampleDistribution(T(0), T(0.3))
b[1] = sample(idist, 1)[1]
if length(b) > 1
distrib = UniformSampleDistribution(T(-0.1), T(0.1))
b[2:p] = sample(distrib, p - 1)
end
eta = X*b
sd = 0.5*abs(mean(eta))
eta = delta .+ eta .+ sd .* randn(n)
return (X = X, eta = eta)
end
|
proofpile-julia0005-42486 | {
"provenance": "014.jsonl.gz:242487"
} | using Submodular
using FactCheck
Tol = 1e-3
facts("Algorithms") do
context("card-inc-fix") do
n = 20
g = rand(n) # the vector used to generate the cardinality-based function
g = sort(g, rev=true)
g = (g + 0.1)/1.01
for i = 2: n
g[i] = g[i] + g[i-1]
end
S = SetVariable(n)
gg = card(g, S) # F(S) = g(|S|)
y = rand(n) # the point to be projected on the base polytope of F(S) = g(|S|)
euclidean_proj = card_inc_fix(gg, y, "euclidean")
# sanity checks:
# Is the sorted order of indices in y and the projection the same?
# this is a known property of projections under uniform divergences over cardinality-based polytopes
@fact sortperm(y) == sortperm(euclidean_proj[1:n]) --> true
# Is the constraint x(E) = F(E) = g(n) satisfied upto an error of Tol^3?
@fact sum(euclidean_proj) - g[n] --> roughly(0, Tol^3)
end
context("frank-wolfe with away steps") do
n = 4
x = Variable(n)
g = norm(x - [3, 2, 5, 1])
S = SetVariable(n)
F = card(S)
p(z) = -0.5*z^2 + n*z + 0.5 * z
perm_func = compose(p, F)
P = BasePoly(perm_func)
prob = SCOPEminimize(g, x in P)
@fact frank_wolfe_away(prob, verbose = false) --> roughly([3, 2, 4, 1], Tol)
end
end
|
proofpile-julia0005-42487 | {
"provenance": "014.jsonl.gz:242488"
} | # JuMP Objectives/constraints #
# ========================== #
function JuMP.normalized_coefficient(
con_ref::ConstraintRef{Model, JuMP._MOICON{F, S}}, decision
) where {S, T, F <: AffineDecisionFunction{T}}
con = JuMP.constraint_object(con_ref)
return JuMP._affine_coefficient(con.func, decision)
end
function JuMP.set_objective_coefficient(model::Model, decision_or_known::Union{DecisionRef, KnownRef}, coeff::Real)
if model.nlp_data !== nothing && _nlp_objective_function(model) !== nothing
error("A nonlinear objective is already set in the model")
end
obj_fct_type = objective_function_type(model)
if obj_fct_type == VariableRef || obj_fct_type == AffExpr || obj_fct_type == QuadExpr
current_obj = objective_function(model)
set_objective_function(model, add_to_expression!(coeff * decision_or_known, current_obj))
elseif obj_fct_type == typeof(decision_or_known)
current_obj = objective_function(model)
if index(current_obj) == index(decision_or_known)
set_objective_function(model, coeff * decision_or_known)
else
set_objective_function(model, add_to_expression!(coeff * decision_or_known, current_obj))
end
elseif obj_fct_type == DecisionAffExpr{Float64} && decision_or_known isa DecisionRef
MOI.modify(
backend(model),
MOI.ObjectiveFunction{moi_function_type(obj_fct_type)}(),
DecisionCoefficientChange(index(decision_or_known), coeff))
elseif obj_fct_type == DecisionAffExpr{Float64} && decision_or_known isa KnownRef
MOI.modify(
backend(model),
MOI.ObjectiveFunction{moi_function_type(obj_fct_type)}(),
KnownCoefficientChange(index(decision_or_known), coeff, value(decision_or_known)))
else
error("Objective function type not supported: $(obj_fct_type)")
end
return nothing
end
function JuMP.normalized_rhs(con_ref::ConstraintRef{Model, JuMP._MOICON{F, S}}) where {
T, S <: Union{MOI.LessThan{T}, MOI.GreaterThan{T}, MOI.EqualTo{T}},
F <: AffineDecisionFunction{T}}
con = constraint_object(con_ref)
return MOI.constant(con.set)
end
function JuMP.set_normalized_coefficient(
con_ref::ConstraintRef{Model, JuMP._MOICON{F, S}}, variable::VariableRef, coeff
) where {S, T, F <: AffineDecisionFunction{T}}
MOI.modify(backend(owner_model(con_ref)), index(con_ref),
MOI.ScalarCoefficientChange(index(variable), convert(T, coeff)))
return nothing
end
function JuMP.set_normalized_coefficient(
con_ref::ConstraintRef{Model, JuMP._MOICON{F, S}}, decision::DecisionRef, coeff
) where {S, T, F <: AffineDecisionFunction{T}}
MOI.modify(backend(owner_model(con_ref)), index(con_ref),
DecisionCoefficientChange(index(decision), convert(T, coeff)))
return nothing
end
function JuMP.set_normalized_coefficient(
con_ref::ConstraintRef{Model, JuMP._MOICON{F, S}}, known::KnownRef, coeff
) where {S, T, F <: AffineDecisionFunction{T}}
MOI.modify(backend(owner_model(con_ref)), index(con_ref),
KnownCoefficientChange(index(known), convert(T, coeff), convert(T, value(known))))
return nothing
end
# Internal update functions #
# ========================== #
function update_decisions!(model::JuMP.Model, change::Union{DecisionModification, KnownModification})
update_decision_objective!(model, objective_function_type(model), change)
update_decision_variable_constraints!(model, change)
update_decision_constraints!(model, change)
end
function update_decision_objective!(::JuMP.Model, ::DataType, ::Union{DecisionModification, KnownModification})
# Nothing to do if objective does not have decisions
return nothing
end
function update_decision_objective!(model::JuMP.Model, func_type::Type{<:DecisionQuadExpr}, change::DecisionModification)
MOI.modify(backend(model),
MOI.ObjectiveFunction{JuMP.moi_function_type(func_type)}(),
change)
return nothing
end
function update_decision_objective!(model::JuMP.Model, func_type::Type{F}, change::KnownModification) where F <: Union{DecisionAffExpr, DecisionQuadExpr}
MOI.modify(backend(model),
MOI.ObjectiveFunction{JuMP.moi_function_type(func_type)}(),
change)
return nothing
end
function update_decision_variable_constraints!(::JuMP.Model, ::Union{DecisionModification, KnownModification})
# Nothing to do in most cases
return nothing
end
function update_decision_variable_constraints!(model::JuMP.Model, change::DecisionStateChange)
for F in [DecisionRef, ]
for S in [MOI.EqualTo{Float64}, MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, FreeDecision]
for cref in all_constraints(model, F, S)
update_decision_constraint!(cref, change)
end
end
end
return nothing
end
function update_decision_variable_constraints!(model::JuMP.Model, ::DecisionsStateChange)
for F in [DecisionRef, ]
for S in [MOI.EqualTo{Float64}, MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, FreeDecision]
for cref in all_constraints(model, F, S)
# Fetch the decision
dref = JuMP.jump_function(model, MOI.get(model, MOI.ConstraintFunction(), cref))
# Perform specific decision state change
change = DecisionStateChange(index(dref), state(dref), 0.0)
update_decision_constraint!(cref, change)
end
end
end
return nothing
end
function update_decision_constraints!(model::JuMP.Model, change::Union{DecisionModification, KnownModification})
for F in [DecisionAffExpr{Float64}, DecisionQuadExpr{Float64}]
for S in [MOI.EqualTo{Float64}, MOI.LessThan{Float64}, MOI.GreaterThan{Float64}]
for cref in all_constraints(model, F, S)
update_decision_constraint!(cref, change)
end
end
end
for F in [Vector{DecisionAffExpr{Float64}}]
for S in [MOI.Zeros, MOI.Nonnegatives, MOI.Nonpositives]
for cref in all_constraints(model, F, S)
update_decision_constraint!(cref, change)
end
end
end
return nothing
end
function update_decision_constraint!(cref::ConstraintRef, change::Union{DecisionModification, KnownModification})
update_decision_constraint!(backend(owner_model(cref)), cref.index, change)
return nothing
end
function update_decision_constraint!(model::MOI.ModelLike, ci::MOI.ConstraintIndex{SingleDecision, S}, change::DecisionModification) where {T,S}
MOI.modify(model, ci, change)
return nothing
end
function update_decision_constraint!(model::MOI.ModelLike, ci::MOI.ConstraintIndex{VectorOfDecisions, S}, change::VectorDecisionModification) where {T,S}
MOI.modify(model, ci, change)
return nothing
end
function update_decision_constraint!(model::MOI.ModelLike, ci::MOI.ConstraintIndex{AffineDecisionFunction{T}, S}, change::Union{DecisionCoefficientChange, KnownModification}) where {T,S}
MOI.modify(model, ci, change)
return nothing
end
function update_decision_constraint!(model::MOI.ModelLike, ci::MOI.ConstraintIndex{AffineDecisionFunction{T}, S}, change::Union{DecisionStateChange, DecisionsStateChange}) where {T,S}
return nothing
end
function update_decision_constraint!(model::MOI.ModelLike, ci::MOI.ConstraintIndex{VectorAffineDecisionFunction{T}, S}, change::Union{DecisionMultirowChange, KnownModification}) where {T,S}
MOI.modify(model, ci, change)
return nothing
end
function update_decision_constraint!(model::MOI.ModelLike, ci::MOI.ConstraintIndex{VectorAffineDecisionFunction{T}, S}, change::Union{DecisionStateChange, DecisionsStateChange}) where {T,S}
return nothing
end
function update_decision_constraint!(model::MOI.ModelLike, ci::MOI.ConstraintIndex{<:QuadraticDecisionFunction{T}, S}, change::Union{DecisionModification, KnownModification}) where {T,S}
MOI.modify(model, ci, change)
return nothing
end
|
proofpile-julia0005-42488 | {
"provenance": "014.jsonl.gz:242489"
} | module Distance
include("dist.jl")
export
# Funtion
GetDistance,
# Types of distances
Euclidean,
CityBlock,
TotalVariation,
Chebyshev,
Jaccard,
BrayCurtis,
CosineDist,
SpanNormDist
end
|
proofpile-julia0005-42489 | {
"provenance": "014.jsonl.gz:242490"
} | using FromFile
using Random: randperm
using LossFunctions
@from "Core.jl" import Options, Dataset, Node
@from "EquationUtils.jl" import countNodes
@from "EvaluateEquation.jl" import evalTreeArray, differentiableEvalTreeArray
function Loss(x::AbstractArray{T}, y::AbstractArray{T}, options::Options{A,B,C})::T where {T<:Real,A,B,C<:SupervisedLoss}
value(options.loss, y, x, AggMode.Mean())
end
function Loss(x::AbstractArray{T}, y::AbstractArray{T}, options::Options{A,B,C})::T where {T<:Real,A,B,C<:Function}
sum(options.loss.(x, y))/length(y)
end
function Loss(x::AbstractArray{T}, y::AbstractArray{T}, w::AbstractArray{T}, options::Options{A,B,C})::T where {T<:Real,A,B,C<:SupervisedLoss}
value(options.loss, y, x, AggMode.WeightedMean(w))
end
function Loss(x::AbstractArray{T}, y::AbstractArray{T}, w::AbstractArray{T}, options::Options{A,B,C})::T where {T<:Real,A,B,C<:Function}
sum(options.loss.(x, y, w))/sum(w)
end
# Loss function. Only MSE implemented right now. TODO
# Also need to put actual loss function in scoreFuncBatch!
function EvalLoss(tree::Node, dataset::Dataset{T}, options::Options;
allow_diff=false)::T where {T<:Real}
if !allow_diff
(prediction, completion) = evalTreeArray(tree, dataset.X, options)
else
(prediction, completion) = differentiableEvalTreeArray(tree, dataset.X, options)
end
if !completion
return T(1000000000)
end
if dataset.weighted
return Loss(prediction, dataset.y, dataset.weights, options)
else
return Loss(prediction, dataset.y, options)
end
end
# Score an equation
function scoreFunc(dataset::Dataset{T},
baseline::T, tree::Node,
options::Options; allow_diff=false)::T where {T<:Real}
mse = EvalLoss(tree, dataset, options; allow_diff=allow_diff)
return mse / baseline + countNodes(tree)*options.parsimony
end
# Score an equation with a small batch
function scoreFuncBatch(dataset::Dataset{T}, baseline::T,
tree::Node, options::Options)::T where {T<:Real}
batchSize = options.batchSize
batch_idx = randperm(dataset.n)[1:options.batchSize]
batch_X = dataset.X[:, batch_idx]
batch_y = dataset.y[batch_idx]
(prediction, completion) = evalTreeArray(tree, batch_X, options)
if !completion
return T(1000000000)
end
if !dataset.weighted
mse = Loss(prediction, batch_y, options)
else
batch_w = dataset.weights[batch_idx]
mse = Loss(prediction, batch_y, batch_w, options)
end
return mse / baseline + countNodes(tree) * options.parsimony
end
|
proofpile-julia0005-42490 | {
"provenance": "014.jsonl.gz:242491"
} | ENV["GKSwstype"]="100"
using Test
using WaterWaves1D
include("./testmodels.jl")
# @testset "LoadSave" begin
#
# dump = convert( ProblemSave, problem1 )
# pload = convert( Problem, dump )
#
# save(problem1, "testsave")
# pload = loadpb("testsave")
#
# @test pload.model.kwargs == problem1.model.kwargs
# @test pload.solver.Uhat == problem1.solver.Uhat
#
# end
param = ( ϵ = 1/2, μ = 1)
paramX= ( N = 2^8, L = 10)
paramT= ( T = 5, dt = 0.1)
@testset "Parameters" begin
@test param.ϵ == 0.5
@test param.μ == 1
@test paramX.N == 256
@test paramX.L == 10
@test paramT.T == 5
@test paramT.dt == 0.1
end
|
proofpile-julia0005-42491 | {
"provenance": "014.jsonl.gz:242492"
} |
function calcJacobian!(rr::RadauIntegrator{T_object,NX,NC,NR,NSM}, x0::Vector{Float64}, t::Float64) where {T_object,NX,NC,NR,NSM}
N_Loop, n_rem = divrem(NX, NC)
(n_rem != 0) && (N_Loop += 1)
i_now = 1:NC
for k = 1:N_Loop
i_clamp = i_now[1]:min(i_now[end], NX)
seed_indices!(rr.cv.x_dual, x0, i_clamp, rr.cv.seed)
rr.de_object.de(rr.cv.xx_dual, rr.cv.x_dual, rr.de_object, t)
write_indices!(rr, i_clamp)
i_now = i_now .+ NC
end
return nothing
end
function seed_indices!(duals::Vector{ForwardDiff.Dual{T,V,N}}, x::Vector{Float64}, index::UnitRange{Int64},
seed::NTuple{N,ForwardDiff.Partials{N,V}}) where {T,V,N}
duals .= x
i = 1
for k = index
duals[k] = ForwardDiff.Dual{T,V,N}(x[k], seed[i])
i += 1
end
return nothing
end
function write_indices!(rr::RadauIntegrator{T_object,NX,NC,NR,NSM}, index::UnitRange{Int64}) where {T_object,NX,NC,NR,NSM}
for i = 1:NX
xx_dual_i = rr.cv.xx_dual[i]
rr.cv.xx_0[i] = ForwardDiff.value(xx_dual_i)
the_partials = ForwardDiff.partials(xx_dual_i)
k = 1
for j = index # for each x
rr.cv.neg_J[i, j] = -the_partials[k] ### the first dual is the partial of the first element wrt all partials ###
k += 1
end
end
return nothing
end
function zeroFill!(tup_vec_in::NTuple{N,Vector{T}}, table::RadauTable{NS}) where {N,T,NS}
for i = 1:NS
fill!(tup_vec_in[i], zero(T))
end
return nothing
end
function initialize_X!(rr::RadauIntegrator{T_object,NX,NC,NR,NSM}, table::RadauTable{NS}, x0::Vector{Float64}) where {T_object,NX,NC,NR,NSM,NS}
if rr.dense.is_has_X
initialize_X_with_interp!(rr, table)
else
initialize_X_with_X0!(rr, table, x0)
end
end
function initialize_X_with_X0!(rr::RadauIntegrator{T_object,NX,NC,NR,NSM}, table::RadauTable{NS}, x0::Vector{Float64}) where {T_object,NX,NC,NR,NSM,NS}
for i = 1:NS
LinearAlgebra.BLAS.blascopy!(NX, x0, 1, rr.ct.X_stage[i], 1)
end
return nothing
end
function updateFX!(rr::RadauIntegrator{T_object,NX,NC,NR,NSM}, table::RadauTable{NS}, x0::Vector{Float64}, t::Float64) where {T_object,NX,NC,NR,NSM,NS}
for i = 1:NS
time_stage = table.c[i] * rr.step.h + t
rr.de_object.de(rr.ct.F_X_stage[i], rr.ct.X_stage[i], rr.de_object, time_stage)
end
return nothing
end
function calcEw!(rr::RadauIntegrator{T_object,NX,NC,NR,NSM}, table::RadauTable{NS}, x0::Vector{Float64}) where {T_object,NX,NC,NR,NSM,NS}
residual = 0.0
for i = 1:NS
# rr.cv.store_float .= rr.ct.X_stage[i] - x0 - rr.step.h * sum( rr.A[i, j] * rr.ct.F_X_stage[j])
LinearAlgebra.BLAS.blascopy!(NX, rr.ct.X_stage[i], 1, rr.cv.store_float, 1)
LinearAlgebra.BLAS.axpy!(-1.0, x0, rr.cv.store_float)
for j = 1:NS
coefficient = -rr.step.h * table.A[i, j]
# rr.cv.store_float .-= (rr.h * rr.A[i, j]) * rr.ct.F_X_stage[j]
LinearAlgebra.BLAS.axpy!(coefficient, rr.ct.F_X_stage[j], rr.cv.store_float)
end
residual += dot(rr.cv.store_float, rr.cv.store_float)
for j = 1:NS
# rr.ct.Ew_stage[j] .+= (rr.step.h⁻¹ * rr.λ[j] * rr.T⁻¹[j, i]) * rr.cv.store_float
coefficient = rr.step.h⁻¹[1] * table.λ[j] * table.T⁻¹[j, i]
LinearAlgebra.BLAS.axpy!(coefficient, rr.cv.store_float, rr.ct.Ew_stage[j])
end
end
return residual
end
function updateInvC!(rr::RadauIntegrator{T_object,NX,NC,NR,NSM}, table::RadauTable{NS}) where {T_object,NX,NC,NR,NSM,NS}
for i = 1:NS
rr.ct.inv_C_stage[i] .= rr.cv.neg_J
for k = 1:NX
rr.ct.inv_C_stage[i][k, k] += rr.step.h⁻¹[1] * table.λ[i]
end
### NOTE: Negative sign taken care of when update X_stage ###
_, ipiv, info = LinearAlgebra.LAPACK.getrf!(rr.ct.inv_C_stage[i]) # rr.cv.store_complex .= - inv(rr.cv.C) * rr.ct.Ew_stage[i]
LinearAlgebra.LAPACK.getri!(rr.ct.inv_C_stage[i], ipiv)
end
return nothing
end
function updateStageX!(rr::RadauIntegrator{T_object,NX,NC,NR,NSM}, table::RadauTable{NS}) where {T_object,NX,NC,NR,NSM,NS}
for i = 1:NS
### NOTE: Negative sign taken care of when update X_stage ###
# rr.cv.store_complex .= rr.ct.inv_C_stage[i] * rr.ct.Ew_stage[i]
LinearAlgebra.BLAS.gemv!('N', one(ComplexF64), rr.ct.inv_C_stage[i], rr.ct.Ew_stage[i], zero(ComplexF64), rr.cv.store_complex)
for j = 1:NS
# rr.ct.delta_Z_stage[j] .+= rr.T[j, i] * rr.cv.store_complex
LinearAlgebra.BLAS.axpy!(table.T[j, i], rr.cv.store_complex, rr.ct.delta_Z_stage[j])
end
end
for i = 1:NS # Update X_stage
# LinearAlgebra.BLAS.axpy!(1.0, real.(rr.ct.delta_Z_stage[i]), rr.ct.X_stage[i])
rr.ct.X_stage[i] .-= real.(rr.ct.delta_Z_stage[i])
end
return nothing
end
|
proofpile-julia0005-42492 | {
"provenance": "014.jsonl.gz:242493"
} | using LinearAlgebra
using SparseArrays
function degree_selector(t, M, U, p)
C = ceil.(abs.(t)*M)'*U
C = zero_to_inf.(C)
# idx is a CarthesianIndex if C' is a Matrix, or a scalar if C' is a row
# vector. idx[1] extract the first index, i.e. row, of the CarthesianIndex
cost, idx = findmin(C')
m = idx[1]
if cost == Inf
cost = 0
end
s = max(cost/m,1);
return (m, s)
end
function zero_to_inf(x::Number)
if x == 0.
Inf
else
x
end
end
|
proofpile-julia0005-42493 | {
"provenance": "014.jsonl.gz:242494"
} | ### A Pluto.jl notebook ###
# v0.12.21
using Markdown
using InteractiveUtils
# ╔═╡ 0abe9cd0-7a26-11eb-0d6d-f986ed77c036
using SpecialFunctions, HypergeometricFunctions, StaticArrays
# ╔═╡ c009caa0-7a27-11eb-28ef-bb4011cdcd50
using ClenshawCurtisBessel
# ╔═╡ 2176cb22-7a63-11eb-0e79-377be1de3061
using DoubleFloats
# ╔═╡ d311f31a-7a5f-11eb-3f9b-c3fd0c2655da
using LinearAlgebra
# ╔═╡ 386fbd2c-7a2a-11eb-051b-13b127ecfc30
struct BrandersPiessensProblem{T,N,M} <: ClenshawCurtisBessel.OliverProblem{T,N,M}
a::T
ν::T
end
# ╔═╡ bc00d5ee-7a5d-11eb-2eb5-5b938e7df938
md"""
Computes the modifed moments from Branders & Piessens 1987.
```math
\begin{aligned}
\frac{a^2}{16} &M_{k}(a, \nu) + \left[ (k+1)^2 - \nu^2 - \frac{a^2}{4} \right] M_{k + 2}(a, \nu) + \left[ 4 \nu^2 - 2(k+4) + 4 \right] M_{k+3}(a,\nu) \\
&- \left[ 2(k+4)^2 - 6 + 6\nu^2 - \frac{3a^2}{8} \right] M_{k+4}(a,\nu) + (4\nu^2 + 2(k+4) + 4)M_{k+5}(a,\nu) \\
&+ \left[ (k+7)^2 - \nu^2 - \frac{a^2}{4} \right]M_{k+6} + \frac{a^2}{16} M_{k+8}(a,\nu) \quad = 0
\end{aligned}
```
"""
# ╔═╡ d9adcfb6-7a4e-11eb-2ada-fbdffca3d2bc
function ClenshawCurtisBessel.OliverP(
OP::BrandersPiessensProblem{T}, s, k) where T
a, ν = OP.a, OP.ν
if s == 8
return a^2/16
elseif s == 7
return zero(T)
elseif s == 6
return (k+1)^2 - ν^2 - a^2/4
elseif s == 5
return 4ν^2 - 2(k+4) + 4
elseif s == 4
return -(2 * (k+4)^2 - 6 + 6ν^2 - 3 * a^2 / 8)
elseif s == 3
return 4ν^2 + 2(k+4) + 4
elseif s == 2
return (k+7)^2 - ν^2 - a^2/4
elseif s == 1
return zero(T)
elseif s == 0
return a^2/16
end
end
# ╔═╡ 29239542-7a56-11eb-152d-b386e0588444
"""
OliverR(OP::BrandersPiessensProblem{T}, i)
Computes ``R(i)``, the right side of the recurrence relation.
"""
ClenshawCurtisBessel.OliverR(
OP::BrandersPiessensProblem{T}, i) where T = zero(T)
# ╔═╡ dbf33dbe-7a54-11eb-0410-9b91fb2d22c8
import ClenshawCurtisBessel: M₀, M₁, M₂, M₃, Mk_asymptotic
# ╔═╡ 97550070-7a54-11eb-3df8-6ff173e946fc
function generate_BC(OP::BrandersPiessensProblem{T,8,2}, maxorder) where T
a, ν = OP.a, OP.ν
MBC = Dict{Int,T}()
MBC[-3] = M₃(a, ν)
MBC[-2] = M₂(a, ν)
MBC[-1] = M₁(a, ν)
MBC[ 0] = M₀(a, ν)
MBC[ 1] = M₁(a, ν)
MBC[ 2] = M₂(a, ν)
MBC[ 3] = M₃(a, ν)
# Branders & Piessens call the max moment N, we call it maxorder
ℓ = max(maxorder, Int(ceil(a + 10)))
print(typeof(a), " ", a, " ", ν, " ", ℓ, "\n")
MBC[ℓ] = Mk_asymptotic(a, ν, ℓ)
MBC[ℓ+1] = Mk_asymptotic(a, ν, ℓ+1)
return ℓ+1, MBC
end
# ╔═╡ 9740e914-7a54-11eb-1a68-0f136447f214
import ClenshawCurtisBessel: assembleP, assembleρ
# using BandedMatrices
# ╔═╡ 3be2f3f0-7a63-11eb-248f-7727012b9b60
HypergeometricFunctions.logabsgamma(x::Double64) =
DoubleFloats.loggamma(abs(x)), sign(gamma(x))
# ╔═╡ 6bbd6c44-7a70-11eb-3859-bd57ba21c45d
# SpecialFunctions.besselj(x::Double64, y::AbstractFloat) =
# SpecialFunctions.besselj(Float64(x), Float64(x))
# ╔═╡ 972d5cdc-7a54-11eb-0619-85688bb73fcf
@time begin
bpp = BrandersPiessensProblem{DoubleFloat,8,2}(10.0, 3.5)
indexBC, MBC = generate_BC(bpp, 200)
ρ = assembleρ(bpp, -3, indexBC, MBC)
𝐏 = assembleP(bpp, -3, indexBC)
sol = qr(𝐏) \ ρ
end
# ╔═╡ ae66d5ca-7a58-11eb-3001-bf4a310c41d4
begin
ref = big"0.0002069511037367724863632484164263304887654"
(sol[11] .- ref) ./ ref
end
# ╔═╡ Cell order:
# ╠═0abe9cd0-7a26-11eb-0d6d-f986ed77c036
# ╠═c009caa0-7a27-11eb-28ef-bb4011cdcd50
# ╠═386fbd2c-7a2a-11eb-051b-13b127ecfc30
# ╟─bc00d5ee-7a5d-11eb-2eb5-5b938e7df938
# ╠═d9adcfb6-7a4e-11eb-2ada-fbdffca3d2bc
# ╠═29239542-7a56-11eb-152d-b386e0588444
# ╠═dbf33dbe-7a54-11eb-0410-9b91fb2d22c8
# ╠═97550070-7a54-11eb-3df8-6ff173e946fc
# ╠═9740e914-7a54-11eb-1a68-0f136447f214
# ╠═2176cb22-7a63-11eb-0e79-377be1de3061
# ╠═d311f31a-7a5f-11eb-3f9b-c3fd0c2655da
# ╠═3be2f3f0-7a63-11eb-248f-7727012b9b60
# ╠═6bbd6c44-7a70-11eb-3859-bd57ba21c45d
# ╠═972d5cdc-7a54-11eb-0619-85688bb73fcf
# ╠═ae66d5ca-7a58-11eb-3001-bf4a310c41d4
|
proofpile-julia0005-42494 | {
"provenance": "014.jsonl.gz:242495"
} | module Syntax
export @traits, @traits_show_implementation
using ExprParsers
import WhereTraits
using WhereTraits: CONFIG
using WhereTraits.Utils
using WhereTraits.InternalState
using Suppressor
include("Lowering.jl")
using .Lowering
include("Parsing.jl")
using .Parsing
include("Rendering.jl")
using .Rendering
"""
@traits f(a, b) where {!isempty(a), !isempty(b)} = (a[1], b[1])
"""
macro traits(expr_original)
expr_expanded = macroexpand(__module__, expr_original)
expr_traits = _traits(@MacroEnv, expr_expanded, expr_original)
expr_traits = esc(expr_traits)
if CONFIG.suppress_on_traits_definitions
expr_traits = :(@suppress $expr_traits)
end
expr_traits
end
function _traits(env, expr_expanded::Expr, expr_original::Expr)
parser = EP.AnyOf(EP.Function(), EP.anything)
_traits_parsed(env, parse_expr(parser, expr_expanded), expr_original)
end
function _traits_parsed(env, func_parsed::EP.Function_Parsed, expr_original::Expr)
basefunc, lowerings = lower_args_default(func_parsed)
basefunc_outer, basefunc_inner = parse_traitsfunction(env, basefunc, expr_original)
exprs = merge_and_render_update(env, basefunc_outer, basefunc_inner, doc = true)
for lowering in lowerings
# As lowering dropped variables, also traits may need to be dropped. Do this silently.
lowered_outer, lowered_inner = parse_traitsfunction(env, lowering, expr_original, on_traits_dropped = msg -> nothing)
# we don't document lowerings
lowered_exprs = merge_and_render_update(env, lowered_outer, lowered_inner, doc = false)
append!(exprs, lowered_exprs)
end
# return nothing in order to not return implementation detail
flatten_blocks(Expr(:block, exprs..., nothing))
end
function _traits_parsed(env, parsed, expr_original)
throw(ArgumentError("@traits macro expects function expression, got `$expr_original`"))
end
end # module
|
proofpile-julia0005-42495 | {
"provenance": "014.jsonl.gz:242496"
} | using AbnormalReturns
using Documenter
Documenter.makedocs(
modules = [AbnormalReturns],
sitename = "AbnormalReturns.jl",
pages = [
"Introduction" => "index.md",
"Example" => "example.md",
"API" => "api.md"
]
)
deploydocs(
repo = "github.com/junder873/AbnormalReturns.jl.git",
target = "build",
) |
proofpile-julia0005-42496 | {
"provenance": "014.jsonl.gz:242497"
} | # phonopy_project.jl
# Demonstration / scratchpad for using JuliaPhonons module
push!(LOAD_PATH,"../src") # Temporary versions of modules in PWD
using JuliaPhonons
# The REPO comes with a basic MAPI cubic unit cell Phonopy calculation
P=read_POSCAR(open("POSCAR"),expansion=[2,2,2])
eigenvectors,eigenmodes=read_meshyaml(open("mesh.yaml"),P)
myf=open("mode_decomposition.dat","w")
gnuplot_header(P,f=myf)
for (count,(eigenvector,eigenmode)) in enumerate(zip(eigenvectors,eigenmodes))
output_animated_xyz(P,count,eigenvector,eigenmode) # generates files anim_{count}.xyz
decompose_eigenmode_atomtype(P,count,eigenvector,eigenmode)
# Output again, to a file for later printing
decompose_eigenmode_atomtype(P,count,eigenvector,eigenmode,f=myf)
decompose_eigenmode_atom_contributions(P,count,eigenvector)
end
close(myf)
|
proofpile-julia0005-42497 | {
"provenance": "014.jsonl.gz:242498"
} |
using SafeTestsets
@safetestset "Probability Objects" begin
using InformationGeometry, Test, LinearAlgebra, Distributions
DS = DataSet([0,0.5,1,1.5],[1.,3.,7.,8.1],[1.2,2.,0.6,1.])
model(x,θ) = θ[1] * x + θ[2]
DM = DataModel(DS,model)
p = rand(2)
@test IsLinear(DM)
Dist = DataDist(ydata(DM),ysigma(DM))
@test abs(loglikelihood(DM,p) - logpdf(Dist,EmbeddingMap(DM,p))) < 1e-13
@test Score(DM,p) ≈ transpose(EmbeddingMatrix(DM,p)) * gradlogpdf(Dist,EmbeddingMap(DM,p))
@test FisherMetric(DM,p) ≈ transpose(EmbeddingMatrix(DM,p)) * inv(cov(Dist)) * EmbeddingMatrix(DM,p)
# Test AD vs manual derivative
@test norm(AutoScore(DM,p) - Score(DM,p)) < 2e-13
@test norm(AutoMetric(DM,p) .- FisherMetric(DM,p), 1) < 2e-9
# Do these tests in higher dimensions, check that OrthVF(PL) IsOnPlane....
# @test OrthVF(DM,XYPlane,p) == OrthVF(DM,p)
@test dot(OrthVF(DM,p),Score(DM,p)) < 1e-14
@test norm(FindMLE(DM) - [5.01511545953636, 1.4629658803705]) < 5e-10
end
@safetestset "Confidence Regions" begin
using InformationGeometry, Test, Plots
DS = DataSet([0,0.5,1,1.5],[1.,3.,7.,8.1],[1.2,2.,0.6,1.])
model(x,θ) = θ[1] * x + θ[2]; DM = DataModel(DS,model)
DME = DataModel(DataSetExact([0,0.5,1,1.5],0.1ones(4),[1.,3.,7.,8.1],[1.2,2.,0.6,1.]), model)
sols = ConfidenceRegions(DM,1:2; tol=1e-6)
@test IsStructurallyIdentifiable(DM,sols[1]) == true
@test size(SaveConfidence(sols,50)) == (50,4)
@test size(SaveGeodesics(sols,50)) == (50,2)
@test size(SaveDataSet(DM)) == (4,3)
@test ConfidenceRegionVolume(DM,sols[1];N=5000) < ConfidenceRegionVolume(DM,sols[2];N=5000,WE=true)
@test size(ConfidenceBands(DM,sols[1]; N=50, plot=false)) == (50,3)
@test size(PlotMatrix(inv(FisherMetric(DM,MLE(DM))),MLE(DM); N=50,plot=false)) == (50,2)
@test typeof(FittedPlot(DM)) <: Plots.Plot
@test typeof(FittedPlot(DME)) <: Plots.Plot
@test typeof(ResidualPlot(DM)) <: Plots.Plot
@test typeof(VisualizeGeos([MBAM(DM)])) <: Plots.Plot
simplermodel(x,p) = p[1]*x; DMSimp = DataModel(DS,simplermodel)
@test length(ConfidenceRegion(DMSimp,1.)) == 2
@test ModelComparison(DM,DMSimp)[2] > 0.
@test FindFBoundary(DM,1)[1] - FindConfBoundary(DM,1)[1] > 0
end
@safetestset "More Boundary tests" begin
using InformationGeometry, Test, Random, Distributions, OrdinaryDiffEq, LinearAlgebra
Random.seed!(31415); normerr(sig::Number) = rand(Normal(0,sig)); quarticlin(x,p) = p[1]*x.^4 .+ p[2]
X = collect(0:0.2:3); err = 2. .+ 2sqrt.(X); Y = quarticlin(X,[1,8.]) + normerr.(err)
ToyDME = DataModel(DataSetExact(X,0.1ones(length(X)),Y,err), (x,p) -> 15p[1]^3 * x.^4 .+ p[2]^5)
@test InterruptedConfidenceRegion(BigFloat(ToyDME), 8.5; tol=1e-5) isa ODESolution
NewX, NewP = TotalLeastSquares(ToyDME)
@test LogLike(Data(ToyDME), NewX, EmbeddingMap(Data(ToyDME),Predictor(ToyDME),NewP,NewX)) > loglikelihood(ToyDME, MLE(ToyDME))
@test ModelMap(Predictor(ToyDME), PositiveDomain(2)) isa ModelMap
sol = ConfidenceRegion(ToyDME,1; tol=1e-6)
@test ApproxInRegion(sol, MLE(ToyDME)) && !ApproxInRegion(sol, sol.u[1] + 1e-5BasisVector(1,2))
#Check that bounding box from ProfileLikelihood coincides roughly with exact box.
Mats = ProfileLikelihood(ToyDME,2; plot=false)
ProfBox = ProfileBox(ToyDME, InterpolatedProfiles(Mats),1)
ExactBox = ConstructCube(ConfidenceRegion(ToyDME,1; tol=1e-6))
@test norm(Center(ProfBox) - Center(ExactBox)) < 3e-5
@test norm(CubeWidths(ProfBox) - CubeWidths(ExactBox)) < 3e-4
@test 0 < PracticallyIdentifiable(Mats) < 2
# Method for general cost functions on 2D domains
sol = GenerateBoundary(x->-norm(x,1.5), [1., 0])
@test 0.23 ≤ length(GenerateBoundary(x->-norm(x,1.5), [1., 0]; Boundaries=(u,t,int)->u[1]<0.).u) / length(sol.u) ≤ 0.27
end
@safetestset "ODE-based models" begin
using InformationGeometry, Test, OrdinaryDiffEq, LinearAlgebra
function SIR!(du,u,p,t)
S, I, R = u
β, γ = p
du[1] = -β * I * S
du[2] = +β * I * S - γ * I
du[3] = +γ * I
nothing
end
SIRsys = ODEFunction(SIR!)
infected = [3, 8, 28, 75, 221, 291, 255, 235, 190, 126, 70, 28, 12, 5]
SIRDS = InformNames(DataSet(collect(1:14), infected, 5ones(14)), ["Days"], ["Infected"])
SIRinitial = X->([763.0-X[1], X[1], 0.0], X[2:3])
# Use SplitterFunction SIRinitial to infer initial condition I₀ as first parameter
SIRDM = DataModel(SIRDS, SIRsys, SIRinitial, x->x[2], [0.6,0.0023,0.46]; tol=1e-6)
@test SIRDM isa DataModel
@test DataModel(SIRDS, SIRsys, [762, 1, 0.], [2], [0.0022,0.45], true; meth=Tsit5(), tol=1e-6) isa DataModel
@test norm(2*EmbeddingMap(Data(SIRDM), Predictor(SIRDM), MLE(SIRDM)) - EmbeddingMap(Data(SIRDM), ModifyODEmodel(SIRDM, x->2*x[2]), MLE(SIRDM))) < 2e-4
end
@safetestset "Model Transformations" begin
using InformationGeometry, Test
PiDM = DataModel(DataSet([0,1], [0.5π,1.5π], [0.5,0.5]), ModelMap((x,p)->p[1], θ->θ[1]-1, HyperCube([[0,5]])))
@test !IsInDomain(Predictor(PiDM), [0.9]) && IsInDomain(Predictor(PiDM), [1.1])
# Translation
PiDM2 = DataModel(Data(PiDM), TranslationTransform(Predictor(PiDM),[1.]))
@test !IsInDomain(Predictor(PiDM2), [-0.1]) && IsInDomain(Predictor(PiDM2), [0.1])
# LogTransform
PiDM3 = DataModel(Data(PiDM), LogTransform(Predictor(PiDM),trues(1)))
@test !IsInDomain(Predictor(PiDM3), exp.([1])-[0.1]) && IsInDomain(Predictor(PiDM3), exp.([1])+[0.1])
DS = DataSet([0,0.5,1,1.5],[1.,3.,7.,8.1],[1.2,2.,0.6,1.])
@test FisherMetric(LinearDecorrelation(DataModel(DS, (x,θ)->θ[1] * x + θ[2])), zeros(2)) ≈ [1 0; 0 1]
# TranstrumModel = ModelMap((x::Real,p::AbstractVector)->exp(-p[1]*x) + exp(-p[2]*x), θ::AbstractVector -> θ[1]>θ[2], PositiveDomain(2, 1e2), (1,1,2))
# TranstrumDM = DataModel(DataSet([0.33, 1, 3], [0.88,0.5,0.35], [0.1,0.3,0.2]), TranstrumModel)
# linTranstrum = LogTransform(TranstrumDM)
# RicciScalar(TranstrumDM, MLE(TranstrumDM)), RicciScalar(linTranstrum, MLE(linTranstrum))
# loglikelihood(TranstrumDM, MLE(TranstrumDM)), loglikelihood(linTranstrum, MLE(linTranstrum))
# Try with normal functions too, not only ModelMaps.
# Try Ricci in particular, maybe as BigFloat.
# Does Score / FisherMetric and AutoDiff still work?
end
@safetestset "In-place ModelMaps" begin
using InformationGeometry, Test, LinearAlgebra
DM = DataModel(DataSet([1,2,3],[4,1,5,2,6.5,3.5],[0.5,0.5,0.45,0.45,0.6,0.6], (3,1,2)), (x,p)-> [p[1]^3*x, p[2]^2*x])
dm = InplaceDM(DM)
@test EmbeddingMap(DM, MLE(DM)) ≈ EmbeddingMap(dm, MLE(dm))
@test EmbeddingMatrix(DM,MLE(DM)) ≈ EmbeddingMatrix(dm,MLE(dm))
@test Score(DM, MLE(DM)) ≈ Score(dm, MLE(dm))
@test FisherMetric(DM, MLE(DM)) ≈ FisherMetric(dm, MLE(dm))
end
@safetestset "Inputting Datasets of various shapes" begin
using InformationGeometry, Test, LinearAlgebra, Random, Distributions, StaticArrays, Plots
ycovtrue = Diagonal([1,2,3]) |> x->convert(Matrix,x)
ptrue = [1.,π,-5.]; ErrorDistTrue = MvNormal(zeros(3),ycovtrue)
model(x::AbstractVector{<:Number},p::AbstractVector{<:Number}) = SA[p[1] * x[1]^2 + p[3]^3 * x[2],
sinh(p[2]) * (x[1] + x[2]), exp(p[1]*x[1] + p[1]*x[2])]
Gen(t) = float.([t,0.5t^2]); Xdata = Gen.(0.5:0.1:3)
Ydata = [model(x,ptrue) + rand(ErrorDistTrue) for x in Xdata]
Sig = BlockMatrix(ycovtrue,length(Ydata)); DS = DataSet(Xdata,Ydata,Sig)
DM = DataModel(DS,model)
@test norm(MLE(DM) - ptrue) < 5e-2
DME = DataModel(DataSetExact(DS), model)
P = MLE(DM) + 0.5rand(length(MLE(DM)))
@test loglikelihood(DM,P) ≈ loglikelihood(DME,P)
@test Score(DM,P) ≈ Score(DME,P)
Planes, sols = ConfidenceRegion(DM,1)
@test typeof(VisualizeSols(Planes,sols)) <: Plots.Plot
ODM = OptimizedDM(DME)
@test norm(EmbeddingMatrix(DME,MLE(DME)) .- EmbeddingMatrix(ODM,MLE(DME)), 1) < 1e-9
CDM = DataModel(CompositeDataSet(Data(ODM)), Predictor(ODM), dPredictor(ODM), MLE(ODM))
@test abs(loglikelihood(ODM, P) - loglikelihood(CDM, P)) < 5e-6
@test norm(Score(ODM, P) - Score(CDM, P)) < 2e-6
@test norm(FisherMetric(ODM, P) - FisherMetric(CDM, P)) < 2e-6
@test norm(InformationGeometry.ResidualStandardError(ODM) - InformationGeometry.ResidualStandardError(CDM)) < 1e-10
lastDS = Data(Data(CDM))[3]
newCDS = vcat(Data(Data(CDM))[1:end-1], [SubDataSet(lastDS, 1:2:Npoints(lastDS))], [SubDataSet(lastDS, 2:2:Npoints(lastDS))]) |> CompositeDataSet
# repeat last component
newmodel(x::AbstractVector{<:Number},p::AbstractVector{<:Number}) = SA[p[1] * x[1]^2 + p[3]^3 * x[2], sinh(p[2]) * (x[1] + x[2]),
exp(p[1]*x[1] + p[1]*x[2]), exp(p[1]*x[1] + p[1]*x[2])]
splitCDM = DataModel(newCDS, newmodel, MLE(CDM))
@test abs(loglikelihood(splitCDM, P) - loglikelihood(CDM, P)) < 1e-5
@test norm(Score(splitCDM, P) - Score(CDM, P)) < 2e-4
@test norm(FisherMetric(splitCDM, P) - FisherMetric(CDM, P)) < 2e-3
end
@safetestset "Priors" begin
using InformationGeometry, Test, LinearAlgebra, Distributions
DS1 = DataSet([0,0.5],[1.,3.],[1.2,2.]); DS2 = DataSet([1,1.5],[7.,8.1],[0.6,1.])
DS = join(DS1, DS2); model(x,θ) = θ[1] * x + θ[2];
DM1 = DataModel(DS1,model); DM = DataModel(DS,model);
logprior(X) = loglikelihood(DM1, X)
DM12 = DataModel(DS2, model, MLE(DM1), logprior)
@test norm(MLE(DM) - MLE(DM12)) < 1e-6
@test loglikelihood(DM, MLE(DM)) ≈ loglikelihood(DM12, MLE(DM))
@test norm(Score(DM, MLE(DM)) - Score(DM12, MLE(DM))) < 1e-12
@test FisherMetric(DM, MLE(DM)) ≈ FisherMetric(DM12, MLE(DM))
# Intentionally not giving information about LogPrior input here
dm = DataModel(DS, model, MLE(DM), x->0.0)
@test loglikelihood(DM, MLE(DM)) ≈ loglikelihood(dm, MLE(DM))
@test Score(DM, MLE(DM)) ≈ Score(dm, MLE(DM))
@test FisherMetric(DM, MLE(DM)) ≈ FisherMetric(dm, MLE(DM))
end
@safetestset "Kullback-Leibler Divergences" begin
using InformationGeometry, Test, LinearAlgebra, Distributions
# Analytical divergences via types defined in Distributions.jl
@test KullbackLeibler(MvNormal([1,2,3.],diagm([2,4,5.])),MvNormal([1,-2,-3.],diagm([1,5,2.]))) ≈ 11.056852819440055
@test KullbackLeibler(Normal(1,2),Normal(5,1)) ≈ 8.806852819440055
@test KullbackLeibler(Cauchy(1,3),Cauchy(5,2)) ≈ 0.5355182363563621
@test KullbackLeibler(Exponential(5),Exponential(1)) ≈ 2.3905620875658995
@test KullbackLeibler(Weibull(12,2),Weibull(3,4)) ≈ 2.146124463755512
@test KullbackLeibler(Gamma(5,15),Gamma(20,1)) ≈ 29.409061308330323
# Numerically calculated for via arbitrary types defined in Distributions.jl
# ALSO ADD TESTS FOR DISCRETE DISTRIBUTIONS, DISTRIBUTIONS WITH LIMITED DOMAIN
@test abs(KullbackLeibler(Cauchy(1,2),Normal(5,1),HyperCube([-20,20]); tol=1e-8) - 16.77645704773449) < 1e-5
@test abs(KullbackLeibler(Cauchy(1,2),Normal(5,1),HyperCube([-20,20]); Carlo=true, N=Int(3e6)) - 16.7764) < 5e-2
@test abs(KullbackLeibler(MvTDist(1,[3,2,1.],diagm([1.,2.,3.])),MvNormal([1,2,3.],diagm([2,4,5.])),HyperCube([[-10,10.] for i in 1:3]); Carlo=true, N=Int(3e6)) - 1.6559288) < 3e-1
# Product distributions, particularly Normal and Cauchy
P = [Normal(0,1), Cauchy(1,2)] |> product_distribution
Q = [Cauchy(1,1), Cauchy(2,4)] |> product_distribution
R = [Normal(2,4), Normal(-1,0.5)] |> product_distribution
@test abs(KullbackLeibler(P, Q, HyperCube([[-20,20] for i in 1:2]); tol=1e-7) - 0.719771180) < 1e-8
@test abs(KullbackLeibler(R, P, HyperCube([[-20,20] for i in 1:2]); tol=1e-7) - 9.920379769) < 1e-8
@test abs(KullbackLeibler(P, R, HyperCube([[-20,20] for i in 1:2]); tol=1e-7) - 48.99179438) < 1e-8
# Via any positive (hopefully normalized) functions
@test abs(KullbackLeibler(x->pdf(Normal(1,3),x),y->pdf(Normal(5,2),y),HyperCube([-20,20]); Carlo=true, N=Int(3e6)) - KullbackLeibler(Normal(1,3),Normal(5,2))) < 2e-2
@test abs(KullbackLeibler(x->pdf(Normal(1,3),x),y->pdf(Normal(5,2),y),HyperCube([-20,20]); tol=1e-8) - KullbackLeibler(Normal(1,3),Normal(5,2))) < 1e-5
P = MvNormal([1,2,3.],diagm([1,2,1.5])); Q = MvNormal([1,-2,-3.],diagm([2,1.5,1.])); Cube = HyperCube([[-15,15] for i in 1:3])
@test abs(KullbackLeibler(x->pdf(P,x),y->pdf(Q,y),Cube; Carlo=true, N=Int(3e6)) - KullbackLeibler(x->pdf(P,x),y->pdf(Q,y),Cube; tol=1e-8)) < 0.8
end
@safetestset "Differential Geometry" begin
using InformationGeometry, Test, LinearAlgebra, StaticArrays
S2metric((θ,ϕ)) = [1.0 0; 0 sin(θ)^2]
function S2Christoffel((θ,ϕ))
Symbol = zeros(suff(ϕ),2,2,2); Symbol[1,2,2] = -sin(θ)*cos(θ)
Symbol[2,1,2] = Symbol[2,2,1] = cos(θ)/sin(θ); Symbol
end
# Calculation by hand works out such that in this special case:
S2Ricci(x) = S2metric(x)
ConstMetric(x) = Diagonal(ones(2))
# Test Numeric Christoffel Symbols, Riemann and Ricci tensors, Ricci Scalar
# Test WITH AND WITHOUT BIGFLOAT
x = rand(2)
@test norm(ChristoffelSymbol(S2metric,x) .- S2Christoffel(x), 1) < 5e-9
@test norm(ChristoffelSymbol(S2metric,BigFloat.(x)) .- S2Christoffel(BigFloat.(x)), 1) < 1e-40
@test abs(RicciScalar(S2metric,x) - 2) < 5e-4
@test abs(RicciScalar(S2metric,BigFloat.(x)) - 2) < 2e-22
# Use wilder metric and test AutoDiff vs Finite
import InformationGeometry: MetricPartials, ChristoffelPartials
Y = rand(3)
Metric3(x) = [sinh(x[3]) exp(x[1])*sin(x[2]) 0; 0 cosh(x[2]) cos(x[2])*x[3]*x[2]; exp(x[2]) cos(x[3])*x[1]*x[2] 0.]
@test MetricPartials(Metric3, Y; ADmode=Val(true)) ≈ MetricPartials(Metric3, Y; ADmode=Val(false))
@test ChristoffelSymbol(Metric3, Y; ADmode=Val(true)) ≈ ChristoffelSymbol(Metric3, Y; ADmode=Val(false))
@test maximum(abs.(ChristoffelPartials(Metric3, Y; ADmode=Val(true)) - ChristoffelPartials(Metric3, Y; ADmode=Val(false), BigCalc=true))) < 1e-11
@test maximum(abs.(Riemann(Metric3, Y; ADmode=Val(true)) - Riemann(Metric3, Y; ADmode=Val(false), BigCalc=true))) < 1e-11
# Test with static arrays
Metric3SA(x) = SA[sinh(x[3]) exp(x[1])*sin(x[2]) 0; 0 cosh(x[2]) cos(x[2])*x[3]*x[2]; exp(x[2]) cos(x[3])*x[1]*x[2] 0.]
@test MetricPartials(Metric3SA, Y; ADmode=Val(true)) ≈ MetricPartials(Metric3SA, Y; ADmode=Val(false))
@test ChristoffelSymbol(Metric3SA, Y; ADmode=Val(true)) ≈ ChristoffelSymbol(Metric3SA, Y; ADmode=Val(false))
@test maximum(abs.(ChristoffelPartials(Metric3SA, Y; ADmode=Val(true)) - ChristoffelPartials(Metric3SA, Y; ADmode=Val(false), BigCalc=true))) < 1e-11
@test maximum(abs.(Riemann(Metric3SA, Y; ADmode=Val(true)) - Riemann(Metric3SA, Y; ADmode=Val(false), BigCalc=true))) < 1e-11
# Test with BigFloat
@test -45 > MetricPartials(Metric3SA, BigFloat.(Y); ADmode=Val(true)) - MetricPartials(Metric3SA, BigFloat.(Y); ADmode=Val(false)) |> maximum |> log10 |> Float64
@test -45 > ChristoffelSymbol(Metric3SA, BigFloat.(Y); ADmode=Val(true)) - ChristoffelSymbol(Metric3SA, BigFloat.(Y); ADmode=Val(false)) |> maximum |> log10 |> Float64
@test -20 > ChristoffelPartials(Metric3SA, BigFloat.(Y); ADmode=Val(true)) - ChristoffelPartials(Metric3SA, BigFloat.(Y); ADmode=Val(false)) |> maximum |> log10 |> Float64
@test -20 > Riemann(Metric3SA, BigFloat.(Y); ADmode=Val(true)) - Riemann(Metric3SA, BigFloat.(Y); ADmode=Val(false)) |> maximum |> log10 |> Float64
@test abs(GeodesicDistance(ConstMetric,[0,0],[1,1]) - sqrt(2)) < 2e-8
@test abs(GeodesicDistance(S2metric,[π/4,1],[3π/4,1]) - π/2) < 1e-9
@test abs(GeodesicDistance(S2metric,[π/2,0],[π/2,π/2]) - π/2) < 1e-8
DS = DataSet([0,0.5,1],[1.,3.,7.],[1.2,2.,0.6]); DM = DataModel(DS, (x,p) -> p[1]^3 *x + p[2]^3)
y = MLE(DM) + 0.2(rand(2) .- 0.5)
geo = GeodesicBetween(DM, MLE(DM), y; tol=1e-11)
@test norm(MLE(DM) - [1.829289173660125,0.942865200406147]) < 1e-7
Len = GeodesicLength(DM,geo)
@test abs(InformationGeometry.ParamVol(geo) * InformationGeometry.GeodesicEnergy(DM,geo) - Len^2) < 1e-8
Confnum = InvConfVol(ChisqCDF(pdim(DM), 2*(LogLikeMLE(DM) - loglikelihood(DM, y))))
@test InformationGeometry.GeodesicRadius(DM, Confnum) - Len < 1e-5
# Apply logarithmic map first since it is typically multi-valued for positively curved manifolds.
@test norm(ExponentialMap(FisherMetric(DM), MLE(DM), LogarithmicMap(FisherMetric(DM), MLE(DM), y)) - y) < 1
end
@safetestset "Numerical Helper Functions" begin
using InformationGeometry, Test, BenchmarkTools, ForwardDiff
# Compare Integrate1D and IntegrateND
# Test integration, differentiation, Monte Carlo, GeodesicLength
# TEST WITH AND WITHOUT BIGFLOAT
@test abs(InformationGeometry.MonteCarloArea(x->((x[1]^2 + x[2]^2) < 1), HyperCube([[-1,1],[-1,1]])) - π) < 1.5e-3
@test abs(Integrate1D(cos, (0,π/2); tol=1e-12) - IntegrateND(cos, (0,π/2); tol=1e-12)) < 1e-10
z = 3rand()
@test abs(Integrate1D(x->2/sqrt(π) * exp(-x^2), [0,z/sqrt(2)]) - ConfVol(z)) < 1e-12
@test abs(LineSearch(x->(x < BigFloat(π))) - π) < 1e-14
@test abs(LineSearch(x->(x < BigFloat(π)), BigFloat(1e-14); tol=1e-30) - BigFloat(π)) < 1e-25
@test abs(CubeVol(TranslateCube(HyperCube([[0,1],[0,π],[-sqrt(2),0]]),rand(3))) - sqrt(2)*π) < 3e-15
k = rand(1:20); r = 10rand()
@test InvChisqCDF(k,Float64(ChisqCDF(k,r))) ≈ r
@test abs(InvChisqCDF(k,ChisqCDF(k,BigFloat(r)); tol=1e-20) - r) < 1e-18
end
|
proofpile-julia0005-42498 | {
"provenance": "014.jsonl.gz:242499"
} | #!/usr/bin/env julia
using Printf
using Distributed
using TimerOutputs
function ζ(N,a)
s = 0.0
e = 1.0*a
VLEN = 512 #vector length
__P_SUM = zeros(Float64,VLEN)
__DEC = zeros(Float64,VLEN)
__D_POW = zeros(Float64,VLEN)
__L = zeros(Float64,VLEN)
start_index = N - (N % VLEN);
end_index = VLEN;
#@printf("start = %i, end = %i\n",start_index,end_index)
#@printf("N = %i\n",N)
for k in 1:VLEN
__DEC[k] = convert(Float64,VLEN)
__L[k] = convert(Float64,start_index - (k - 1));
end
#@printf("(pre warmup) s=%f\n",s)
# warm up loop to handle non-vector size clean up before main loop
if N == start_index
s = 0.0
else
for i ∈ N:-1:start_index+1
s += (1.0/i)^e
end
end
#@printf("(post warmup) s=%f\n",s)
# main loop
for k in start_index:-VLEN:end_index
__D_POW = (1.0 ./ __L).^e
__P_SUM += __D_POW
__L -= __DEC
end
# clean up loop to handle post vector loop calc
#for i in end_index:-1:1
# s += (1.0/i)^e
#end
# sum reduction
for i in 1:VLEN
s+=__P_SUM[i]
end
s
end
to = TimerOutput()
N = 16000000000
st = 1
s = zeros(Float64,32)
pd = zeros(Float64,32)
ndx = 1
#ENV["JULIA_DEBUG"] = "CUDAnative"
s[1] = ζ(N,2)
for i=1:1
@printf("s[%i] = %f\n",i,s[i])
end
@timeit to "ζ" pd[ndx] = ζ(N,2)
ndx+=1
for i=1:1
@printf("pd[%i] = %f\n",i,pd[i])
end
show(to)
@printf("\n");
|
proofpile-julia0005-42499 | {
"provenance": "014.jsonl.gz:242500"
} | using BinDeps
using CMakeWrapper
@BinDeps.setup
function cflags_validator(pkg_names...)
return (name, handle) -> begin
for pkg_name in pkg_names
try
run(`pkg-config --cflags $(pkg_name)`)
return true
catch ErrorException
end
end
false
end
end
"""
Director's libraries are unversioned, so there's no possible way
to know if a system-installed version of Director is compatible
with this interface. Instead, we have to restrict ourselves to
only versions of Director which are built locally.
"""
function is_local_build(name, handle)
startswith(relpath(name, @__DIR__), "usr/")
end
basedir = dirname(@__FILE__)
director_version = "0.1.0-234-g74cea84"
director_sha = "02c2ef65f8d1d9f3de1d56d129351cd43846d70b"
@static if Sys.islinux()
python = library_dependency("python", aliases=["libpython2.7.so",], validate=cflags_validator("python", "python2"))
qt4 = library_dependency("QtCore", aliases=["libQtCore.so", "libQtCore.so.4.8"], depends=[python])
qt4_opengl = library_dependency("QtOpenGL", aliases=["libQtOpenGL.so", "libQtOpenGL.so.4.8"], depends=[qt4])
director = library_dependency("ddApp", aliases=["libddApp"], depends=[python, qt4, qt4_opengl], validate=is_local_build)
linux_distributor = strip(read(`lsb_release -i -s`, String))
linux_version = try
VersionNumber(strip(read(`lsb_release -r -s`, String)))
catch e
if isa(e, ArgumentError)
v"0.0.0"
else
rethrow(e)
end
end
provides(AptGet, Dict("libqt4-dev"=>qt4, "libqt4-opengl-dev"=>qt4_opengl, "python-dev"=>python))
force_source_build = lowercase(get(ENV, "DIRECTOR_BUILD_FROM_SOURCE", "")) in ["true", "1"]
director_binary = nothing
if !force_source_build
if linux_distributor == "Ubuntu"
if linux_version >= v"16.04"
director_binary = "ubuntu-16.04"
elseif linux_version >= v"14.04"
director_binary = "ubuntu-14.04"
end
elseif linux_distributor == "Debian"
if linux_version >= v"8.7"
director_binary = "ubuntu-14.04"
end
end
end
if director_binary !== nothing
provides(BuildProcess, (@build_steps begin
FileDownloader("https://dl.bintray.com/patmarion/director/director-$(director_version)-$(director_binary).tar.gz",
joinpath(basedir, "downloads", "director.tar.gz"))
CreateDirectory(joinpath(basedir, "usr"))
(`tar xzf $(joinpath(basedir, "downloads", "director.tar.gz")) --directory=usr --strip-components=1`)
end), director)
else
provides(Sources,
URI("https://github.com/RobotLocomotion/director/archive/$(director_sha).zip"),
director,
unpacked_dir="director-$(director_sha)")
provides(CMakeProcess(srcdir=joinpath(basedir, "src",
"director-$(director_sha)", "distro", "superbuild"),
cmake_args=["-DUSE_LCM=ON",
"-DUSE_EXTERNAL_INSTALL:BOOL=ON"],
targetname=""),
director)
end
elseif Sys.isapple()
# Use the libvtkDRCFilters library instead of libddApp
# to work around weird segfault when dlclose()-ing libddApp
director = library_dependency("vtkDRCFilters", aliases=["libvtkDRCFilters.dylib"], validate=is_local_build)
provides(BuildProcess, (@build_steps begin
FileDownloader("https://dl.bintray.com/patmarion/director/director-$(director_version)-mac.tar.gz",
joinpath(basedir, "downloads", "director.tar.gz"))
CreateDirectory(joinpath(basedir, "usr"))
(`tar xzf $(joinpath(basedir, "downloads", "director.tar.gz")) --directory=usr --strip-components=1`)
end), director)
end
@BinDeps.install Dict(:ddApp => :libddApp)
|
proofpile-julia0005-42500 | {
"provenance": "014.jsonl.gz:242501"
} | @doc raw"""
cronbach(Σ::AbstractMatrix)
Calculate [Cronbach's alpha](https://en.wikipedia.org/wiki/Cronbach%27s_alpha).
This is also known as tau-equivalent reliability (``\rho_T``).
`Σ` is a covariance matrix (which means it must be square).
# Examples
```jldoctest
julia> C = [10 6 6 6; # fictitious data
6 11 6 6;
6 6 12 6;
6 6 6 13];
julia> cronbach(C)
0.8135593220338984
```
"""
function cronbach(Σ::AbstractMatrix)
k, m = size(Σ)
k == m || throw(DimensionMismatch("dimensions must match"))
σᵢⱼ = zero(eltype(Σ))
@inbounds for i in 2:k, j in 1:i-1
σᵢⱼ += Σ[i, j]
end
σₓ² = 2σᵢⱼ + sum(diag(Σ))
σᵢⱼ /= (k - 1) * k / 2
return k^2 * σᵢⱼ / σₓ²
end |
proofpile-julia0005-42501 | {
"provenance": "014.jsonl.gz:242502"
} | nodes(g::Graph) = keys(fadj(g))
out_neighbor(g::Graph, u) = keys(fadj(g)[u])
in_neighbor(g::Graph, u) = keys(badj(g)[u])
has_node(g::Graph, u) = u in nodes(g) |
proofpile-julia0005-42502 | {
"provenance": "014.jsonl.gz:242503"
} | #!/usr/bin/env julia
using Test
# write your own tests here
@test 1 == 1
using CORBITS
function test_corbits()
#@compat is_windows() ? error("# CORBITS won't work on windows") : nothing
a = Cdouble[0.05, 0.15]
r_star = convert(Cdouble,0.005)
r = Cdouble[0.0001,0.0001]
ecc = Cdouble[0.02, 0.1]
Omega = Cdouble[0.0, 0.0]
omega = Cdouble[ 0.0, 0.5]
inc = Cdouble[pi/2, pi/2]
use_pl = Cint[1,1]
prob_of_transits_approx(a, r_star, r, ecc, Omega, omega, inc, use_pl)
end
isapprox(test_corbits(), 0.03367003367003367, atol= 0.0001)
|
proofpile-julia0005-42503 | {
"provenance": "014.jsonl.gz:242504"
} | using Test
import ParallelStencil
using ParallelStencil.ParallelKernel
import ParallelStencil.ParallelKernel: @reset_parallel_kernel, @is_initialized, @get_package, @get_numbertype, SUPPORTED_PACKAGES, PKG_CUDA, PKG_NONE, NUMBERTYPE_NONE
import ParallelStencil.ParallelKernel: @require, @symbols, longnameof
TEST_PACKAGES = SUPPORTED_PACKAGES
@static if PKG_CUDA in TEST_PACKAGES
import CUDA
if !CUDA.functional() TEST_PACKAGES = filter!(x->x≠PKG_CUDA, TEST_PACKAGES) end
end
@static for package in TEST_PACKAGES eval(:(
@testset "$(basename(@__FILE__)) (package: $(nameof($package)))" begin
@testset "1. Reset of ParallelKernel" begin
@testset "Reset if not initialized" begin
@require !@is_initialized()
@reset_parallel_kernel()
@test !@is_initialized()
@test @get_package() == $PKG_NONE
@test @get_numbertype() == $NUMBERTYPE_NONE
end;
@testset "Reset if initialized" begin
@require !@is_initialized()
@init_parallel_kernel($package, Float64)
@require @is_initialized() && @get_package() == $package
@reset_parallel_kernel()
@test length(@symbols($(@__MODULE__), Data)) == 1
@test !@is_initialized()
@test @get_package() == $PKG_NONE
@test @get_numbertype() == $NUMBERTYPE_NONE
end;
end;
end;
)) end == nothing || true;
|
proofpile-julia0005-42504 | {
"provenance": "014.jsonl.gz:242505"
} | # Starting point for conditional example for Learning Julia
# Create an if-else conditional
x = 17
if x < 10
println("x is small")
else
println("x is big")
end
# multiple conditions can be specified with if-elseif-else
if x < 10
println("x is small")
elseif x >= 10 && x < 25
println("x is medium")
else
println("x is big")
end
# The ternary operator can condense a comparison
println(x < 10 ? "x is less than 10" : "x is 10 or greater")
# ternary operator condense if-else conditional statement
# if the condition is true, execute the portion between ? and :
# if the condition is false, execute the portion after :
|
proofpile-julia0005-42505 | {
"provenance": "014.jsonl.gz:242506"
} | import ClausenFunctions
n = 10_000_000
x_min = 0.0
x_max = pi
data = (x_max - x_min)*rand(Float64, n) + x_min*ones(n)
println("Benchmarking cl1::Float64")
time_cl1(data) = @time map(ClausenFunctions.cl1, data)
map(ClausenFunctions.cl1, data) # trigger compilation
time_cl1(data) # trigger compilation
time_cl1(data)
time_cl1(data)
println("Benchmarking cl2::Float64")
time_cl2(data) = @time map(ClausenFunctions.cl2, data)
map(ClausenFunctions.cl2, data) # trigger compilation
time_cl2(data) # trigger compilation
time_cl2(data)
time_cl2(data)
println("Benchmarking cl3::Float64")
time_cl3(data) = @time map(ClausenFunctions.cl3, data)
map(ClausenFunctions.cl3, data) # trigger compilation
time_cl3(data) # trigger compilation
time_cl3(data)
time_cl3(data)
println("Benchmarking cl4::Float64")
time_cl4(data) = @time map(ClausenFunctions.cl4, data)
map(ClausenFunctions.cl4, data) # trigger compilation
time_cl4(data) # trigger compilation
time_cl4(data)
time_cl4(data)
println("Benchmarking cl5::Float64")
time_cl5(data) = @time map(ClausenFunctions.cl5, data)
map(ClausenFunctions.cl5, data) # trigger compilation
time_cl5(data) # trigger compilation
time_cl5(data)
time_cl5(data)
println("Benchmarking cl6::Float64")
time_cl6(data) = @time map(ClausenFunctions.cl6, data)
map(ClausenFunctions.cl6, data) # trigger compilation
time_cl6(data) # trigger compilation
time_cl6(data)
time_cl6(data)
println("Benchmarking cl::Float64")
time_cl(k, data) = @time map(x -> ClausenFunctions.cl(k, x), data)
n = 1_000_000
data = (x_max - x_min)*rand(Float64, n) + x_min*ones(n)
for k in vcat(collect(1:16), [1000, 1001, 1_000_000])
println("Benchmarking cl($(k),x)::Float64")
map(x -> ClausenFunctions.cl(k, x), data) # trigger compilation
time_cl(k, data) # trigger compilation
time_cl(k, data)
time_cl(k, data)
end
println("Benchmarking sl::Float64")
time_sl(k, data) = @time map(x -> ClausenFunctions.sl(k, x), data)
n = 1_000_000
data = (x_max - x_min)*rand(Float64, n) + x_min*ones(n)
for k in vcat(collect(1:31), [1000, 1001, 1_000_000])
println("Benchmarking sl($(k),x)::Float64")
map(x -> ClausenFunctions.sl(k, x), data) # trigger compilation
time_sl(k, data) # trigger compilation
time_sl(k, data)
time_sl(k, data)
end
|
proofpile-julia0005-42506 | {
"provenance": "014.jsonl.gz:242507"
} | module ASTInterface
__precompile__(false)
using Distributions
using CommonRLInterface
import Base: rand
export ASTMDP,
Simulation,
Environment,
EnvironmentValue,
reset!,
environment,
observe,
step!,
isterminal,
isevent,
distance,
flatten,
unflatten
include("AST.jl")
include("interface.jl")
include("RL.jl")
end |
proofpile-julia0005-42507 | {
"provenance": "014.jsonl.gz:242508"
} | """
$SIGNATURES
Resolve arguments for `LxObj` by interpolating `#k` appropriately.
"""
function resolve_args(base::AS, braces::Vector{OCBlock})
res = base
for (i, brace) in enumerate(braces)
brace_content = stent(brace)
# space-sensitive 'unsafe' one
res = replace(res, "!#$i" => brace_content)
# space-insensitive 'safe' one (e.g. for things like `\mathbb#1`)
res = replace(res, "#$i" => " " * brace_content)
end
return res
end
"""
$(SIGNATURES)
Take a `<: LxObj` and try to resolve it by looking up the appropriate definition, applying it then
reparsing the result.
"""
function resolve_lxobj(lxo::LxObj, lxdefs::Vector{LxDef};
inmath::Bool=false)::String
# retrieve the definition the environment points to
lxd = getdef(lxo)
env = lxo isa LxEnv
# in case it's defined in Utils or in Franklin
name = getname(lxo)
fun = Symbol(ifelse(env, "env_", "lx_") * name)
# it will be `nothing` in math mode or when defined in utils
if isnothing(lxd)
# check if it's defined in Utils and act accordingly
if isdefined(Main, utils_symb()) && isdefined(utils_module(), fun)
raw = Core.eval(utils_module(), :($fun($lxo, $lxdefs)))
return reprocess(raw, lxdefs)
else
# let the math backend deal with the string
return lxo.ss
end
end
# the definition can be empty (which can be on purpose, for internal defs)
if (!env && isempty(lxd)) || (env && isempty(lxd.first) && isempty(lxd.second))
name = getname(lxo)
isdefined(Franklin, fun) && return eval(:($fun($lxo, $lxdefs)))
return ""
end
# non-empty cases
if !env
partial = resolve_args(lxd, lxo.braces)
else
partial = resolve_args(lxd.first, lxo.braces)
partial *= content(lxo)
partial *= resolve_args(lxd.second, lxo.braces)
end
# if 'inmath' surround accordingly so that this information is preserved
inmath && (partial = mathenv(partial))
return reprocess(partial, lxdefs)
end
"""
$SIGNATURES
Convenience function to take a markdown string (e.g. produced by a latex command) and re-parse it.
"""
function reprocess(s::AS, lxdefs::Vector{<:LxDef}; nostripp=false) where T
r = convert_md(s, lxdefs;
isrecursive=true, isconfig=false, has_mddefs=false,
nostripp=nostripp)
return r
end
|
proofpile-julia0005-42508 | {
"provenance": "014.jsonl.gz:242509"
} | export PermList
export randpermlist, leastmoved, greatestmoved, supportsize, support, fixed
export pow2
export pivtopermlist
import DataStructures: list
immutable PermList{T<:Real} <: AbstractPerm{T}
data::Vector{T}
end
setindex!(p::PermList, i::Int, k::Integer) = p.data[k] = i
getindex{T}(p::PermList{T}, k::Real) = k > length(p.data) ? convert(T,k) : (p.data)[k]
length(p::PermList) = length(p.data)
randcyclelist(n::Integer) = PermList(randcycle(n))
pivtopermlist{T<:Real}(piv::AbstractArray{T}) = PermList(PermPlain.ipiv2perm(piv))
pivtopermlist{T<:Real}(piv::AbstractArray{T},n) = PermPlain.ipiv2perm(piv,n)
## Apply permutation, and permutation operations ##
getindex{T}(v::Array{T,1},p::PermList{Bool}) = error("Silence compiler. You don't want Bool, anyway")
getindex(v::Array, p::PermList) = v[p.data] # How to define this for everything?
getindex(v::String, p::PermList) = v[p.data] # How to define this for everything?
*(p::PermList, k::Real) = k > length(p) ? k : p[k]
*{T}(p::PermList, a::AbstractVector{T}) = PermPlain.permapply(p.data,a)
# updating ops are "syntactic-operators". Don't know how to define method for them
# *=(p::PermList, q::PermList) = (permcompose!(p.data,q.data), p)
/(p::PermList, q::PermList) = PermList(PermPlain.permcompose(p.data,invperm(q.data)))
# preimage of k under p
/(k::Int, p::PermList) = PermPlain.preimage(p.data,k)
## Output ##
show(io::IO, p::PermList) = print(io,p)
show(p::PermList) = print(p)
# This is needed to avoid trying to print PermList with showarray and failing in 1000 ways
writemime(io::IO, ::MIME"text/plain", p::PermList) = print(io,p)
|
proofpile-julia0005-42509 | {
"provenance": "014.jsonl.gz:242510"
} | """
OrbitInfo
Struct containing orbit, start time, and observer location info.
Fields:
- `start_time`: start time in UTC in the format
(year, month, day, hour, minute, second)
- `start_time_julian_date`: start time in Julian days
- `site_loc_lla`: site latitude, longitude, and height in (deg, deg, meters)
- `site_loc_ecef`: site ECEF position in (meters, meters, meters)
- `tle_file_name`: file name of satellite TLE file
- `tle`: `TLE` struct (data loaded from TLE file)
- `orb`: single or vector of `OrbitPropagator` structs
- `eop`: Earth Orientation Parameters object
"""
mutable struct OrbitInfo{T1,T2,T3,T4,T5,T6,T7,T8}
start_time::T1
start_time_julian_date::T2
site_loc_lla::T3
site_loc_ecef::T4
tle_file_name::T5
tle::T6
orb::T7
eop::T8
end
"""
initorbitinfo(source_tle::String, target_tle::String, start_time,
site_loc_lla)
Initialize the struct OrbitInfo for multiple satellites. Provide the file names
of the individual TLE files.
Required Arguments:
- `source_tle::String`: TLE string for satellite that is trasmitting signal
- `target_tle::String`: TLE string for satellite that reflectes transmitted
signal from source satellite
- `start_time`: start time in UTC in the format
(year, month, day, hour, minute, second)
- `site_loc_lla`: site latitude, longitude, and height in (deg, deg, meters)
Returns:
- `OrbitInfo` struct
"""
function initorbitinfo(source_tle::String, target_tle::String, start_time,
site_loc_lla)
start_time_julian_date = DatetoJD(start_time...)
lat = deg2rad(site_loc_lla[1])
lon = deg2rad(site_loc_lla[2])
height = site_loc_lla[3]
site_loc_ecef = GeodetictoECEF(lat, lon, height)
tle = [read_tle(source_tle)[1], read_tle(target_tle)[1]]
orb = [init_orbit_propagator(Val(:sgp4), tle[1], sgp4_gc_wgs84),
init_orbit_propagator(Val(:sgp4), tle[2], sgp4_gc_wgs84)]
eop = get_iers_eop(:IAU1980)
tle_names = [source_tle, target_tle]
return OrbitInfo(start_time, start_time_julian_date,
site_loc_lla, site_loc_ecef,
tle_names, tle, orb, eop)
end
"""
initorbitinfo(source_tle::TLE, target_tle::TLE, start_time,
site_loc_lla)
Initialize the struct OrbitInfo for multiple satellites. Provide the already
loaded TLE files as `SatelliteToolbox` `TLE` structs.
Required Arguments:
- `source_tle::TLE`: TLE struct for satellite that is trasmitting signal
- `target_tle::TLE`: TLE struct for satellite that reflectes transmitted
signal from source satellite
- `start_time`: start time in UTC in the format
(year, month, day, hour, minute, second)
- `site_loc_lla`: site latitude, longitude, and height in (deg, deg, meters)
Returns:
- `OrbitInfo` struct
"""
function initorbitinfo(source_tle::TLE, target_tle::TLE, start_time,
site_loc_lla)
start_time_julian_date = DatetoJD(start_time...)
lat = deg2rad(site_loc_lla[1])
lon = deg2rad(site_loc_lla[2])
height = site_loc_lla[3]
site_loc_ecef = GeodetictoECEF(lat, lon, height)
tle = [source_tle, target_tle]
orb = [init_orbit_propagator(Val(:sgp4), tle[1], sgp4_gc_wgs84),
init_orbit_propagator(Val(:sgp4), tle[2], sgp4_gc_wgs84)]
eop = get_iers_eop(:IAU1980)
tle_names = [source_tle.name, target_tle.name]
return OrbitInfo(start_time, start_time_julian_date,
site_loc_lla, site_loc_ecef,
tle_names, tle, orb, eop)
end
"""
initorbitinfo(source_tle::String, start_time, site_loc_lla)
Initialize the struct OrbitInfo for single satellite. Provide the file name of
the individual TLE file.
Required Arguments:
- `source_tle::String`: TLE string for satellite that is trasmitting signal
- `start_time`: start time in UTC in the format
(year, month, day, hour, minute, second)
- `site_loc_lla`: site latitude, longitude, and height in (deg, deg, meters)
Returns:
- `OrbitInfo` struct
"""
function initorbitinfo(source_tle::String, start_time, site_loc_lla)
start_time_julian_date = DatetoJD(start_time...)
lat = deg2rad(site_loc_lla[1])
lon = deg2rad(site_loc_lla[2])
height = site_loc_lla[3]
site_loc_ecef = GeodetictoECEF(lat, lon, height)
tle = read_tle(source_tle)[1]
orb = init_orbit_propagator(Val(:sgp4), tle, sgp4_gc_wgs84)
eop = get_iers_eop(:IAU1980)
return OrbitInfo(start_time, start_time_julian_date,
site_loc_lla, site_loc_ecef,
source_tle, tle, orb, eop)
end
"""
initorbitinfo(source_tle::TLE, start_time, site_loc_lla)
Initialize the struct OrbitInfo for single satellite. Provide the already loaded
TLE file as a `SatelliteToolbox` `TLE` struct.
Required Arguments:
- `source_tle::TLE`: TLE struct for satellite that is trasmitting signal
- `start_time`: start time in UTC in the format
(year, month, day, hour, minute, second)
- `site_loc_lla`: site latitude, longitude, and height in (deg, deg, meters)
Returns:
- `OrbitInfo` struct
"""
function initorbitinfo(source_tle::TLE, start_time, site_loc_lla)
start_time_julian_date = DatetoJD(start_time...)
lat = deg2rad(site_loc_lla[1])
lon = deg2rad(site_loc_lla[2])
height = site_loc_lla[3]
site_loc_ecef = GeodetictoECEF(lat, lon, height)
tle = source_tle
orb = init_orbit_propagator(Val(:sgp4), tle, sgp4_gc_wgs84)
eop = get_iers_eop(:IAU1980)
return OrbitInfo(start_time, start_time_julian_date,
site_loc_lla, site_loc_ecef,
source_tle.name, tle, orb, eop)
end
"""
SatelliteRAE
Holds information on a given satellite range, azimuth, and elevation from a
observer position.
Fields:
- `name`: name of satellite
- `sat_tle`: `Satellite` or `TLE` struct
- `julian_date_range`: two element vector containing start and end times
in Julian days
- `obs_lla`: observer latitude, longitude, and height
- `obs_ecef`: observer ECEF coordinates
- `Δt`: step size of data
- `ts`: vector of time corresponding to data
- `sat_range`: distance between satellite and observer
- `sat_azimuth`: vector of satellite azimuths
- `sat_elevation`: vector of satellite elevations
- `sat_ecef`: vector of satellite ECEF coordinates
"""
struct SatelliteRAE{A1,A2,A3,A4,A5,A6,A7,A8,A9,A10,A11}
name::A1
sat_tle::A2
julian_date_range::A3
obs_lla::A4
obs_ecef::A5
Δt::A6
ts::A7
sat_range::A8
sat_azimuth::A9
sat_elevation::A10
sat_ecef::A11
end
"""
calcenumatrix(obs_lla)
Calculate the ECEF to ENU transformation matrix using the observer's position in
LLA.
**NOTE:** Latitudes and longitudes are in radians.
Required Arguments:
- `obs_lla`: observer latitude, longitude, and height in (deg, deg, meters)
Returns:
- ECEF to ENU transformation matrix
"""
function calcenumatrix(obs_lla)
lat = deg2rad(obs_lla[1]) # rad
lon = deg2rad(obs_lla[2]) # rad
h = obs_lla[3] # meters
return [ -sin(lon) cos(lon) 0;
-sin(lat)*cos(lon) -sin(lat)*sin(lon) cos(lat);
cos(lat)*cos(lon) cos(lat)*sin(lon) sin(lat)]
end
"""
calcelevation(sat_tle::TLE, julian_date_range, eop, obs_lla;
name="Satellite", Δt=1/60/60/24)
Calculates the elevation of a given satellite relative to the observer for every
second between the range specified in `julian_date_range`.
Required Arguments:
- `sat_tle::TLE`: satellite TLE struct
- `julian_date_range`: two element vector containing start and end times
in Julian days
- `eop`: Earth Orientation Parameters object
- `obs_lla`: observer latitude, longitude, and height in (deg, deg, meters)
Optional Arguments:
- `name`: name of satellite `(default = "Satellite"`
- `Δt`: step size in seconds `(default = 1/60/60/24)`
Returns:
- `SatelliteRAE` struct
"""
function calcelevation(sat_tle::TLE, julian_date_range, eop, obs_lla;
name="Satellite", Δt=1/60/60/24)
obs_ecef = GeodetictoECEF(deg2rad(obs_lla[1]), deg2rad(obs_lla[2]),
obs_lla[3])
sat_orb = init_orbit_propagator(Val{:sgp4}, sat_tle)
ts = Array(julian_date_range[1]:Δt:julian_date_range[2])
# Propagate orbit to ts
sat_orb, rs, vs = propagate_to_epoch!(sat_orb, ts)
# Allocate space for storage
sat_ranges = Array{Float64}(undef, length(ts))
azs = Array{Float64}(undef, length(ts))
els = Array{Float64}(undef, length(ts))
sat_ecefs = Array{Float64}(undef, length(ts), 3)
# Calculate ENU transformation matrix
R_ENU = calcenumatrix(obs_lla)
for i in 1:length(ts)
# Convert orbits to state vectors
sat_teme = kepler_to_sv(sat_orb[i])
# Transform TEME to ECEF frame and extract position
sat_ecef = svECItoECEF(sat_teme, TEME(), ITRF(), sat_teme.t, eop).r
# Calculate user-to-sat vector
user_to_sat = sat_ecef - obs_ecef
# Caluclate satellite range
sat_range = norm(user_to_sat)
# Normalize `user_to_sat`
user_to_sat_norm = user_to_sat./sat_range
# Transform normalized user-to-sat vector to ENU
enu = R_ENU*user_to_sat_norm # [East, North, South]
# Calculate satellite azimuth
az = atan(enu[1], enu[2])
# Calculate satellite elevation
el = asin(enu[3]/norm(enu))
# Save values
sat_ranges[i] = sat_range
azs[i] = rad2deg(az)
els[i] = rad2deg(el)
sat_ecefs[i,:] = sat_ecef
end
return SatelliteRAE(name, sat_tle, julian_date_range,
obs_lla, obs_ecef, Δt, ts, sat_ranges,
azs, els, sat_ecefs)
end
"""
calcelevation(satellite::Satellite, obs_lla; name=string(satellite.id))
Calculates the elevation of a given satellite relative to the observer for all
time specified in `satellite.t`.
Required Arguments:
- `satellite::Satellite`: struct containing satellite orbit information
- `obs_lla`: observer latitude, longitude, and height in (deg, deg, meters)
Optional Arguments:
- `name`: name of satellite `(default = string(satellite.id))`
Returns:
- `SatelliteRAE` struct
"""
function calcelevation(satellite::Satellite, obs_lla; name=string(satellite.id))
obs_ecef = GeodetictoECEF(deg2rad(obs_lla[1]), deg2rad(obs_lla[2]),
obs_lla[3])
ts = satellite.t
Δt = ts[2] - ts[1]
julian_date_range = (ts[1], ts[end])
# Allocate space for storage
sat_ranges = Array{Float64}(undef, length(ts))
azs = Array{Float64}(undef, length(ts))
els = Array{Float64}(undef, length(ts))
# Calculate ENU transformation matrix
R_ENU = calcenumatrix(obs_lla)
for i in 1:length(ts)
sat_ecef = satellite.r_ecef[i,:]
# Calculate user-to-sat vector
user_to_sat = sat_ecef - obs_ecef
# Caluclate satellite range
sat_range = norm(user_to_sat)
# Normalize `user_to_sat`
user_to_sat_norm = user_to_sat./sat_range
# Transform normalized user-to-sat vector to ENU
enu = R_ENU*user_to_sat_norm # [East, North, South]
# Calculate satellite azimuth
az = atan(enu[1], enu[2])
# Calculate satellite elevation
el = asin(enu[3]/norm(enu))
# Save values
sat_ranges[i] = sat_range
azs[i] = rad2deg(az)
els[i] = rad2deg(el)
end
return SatelliteRAE(name, satellite, julian_date_range,
obs_lla, obs_ecef, Δt, ts, sat_ranges,
azs, els, satellite.r_ecef)
end
"""
calcelevation(sat_ecef, obs_lla)
Calculates the elevation of an object at a given ECEF coordinate specified by
`sat_ecef`. Returns only `(sat_range, az, el)` instead of `SatelliteRAE` struct.
Format is `(meters, deg, deg)`.
Required Arguments:
- `sat_ecef`: satellite ECEF coordinates with all components in meters
- `obs_lla`: observer latitude, longitude, and height in (deg, deg, meters)
Returns:
- `sat_range`: range between satellite and observer in meters
- `az`: satellite azimuth in degrees
- `el`: satellite elevation in degrees
"""
function calcelevation(sat_ecef, obs_lla)
# Calculate ENU transformation matrix
R_ENU = calcenumatrix(obs_lla)
obs_ecef = GeodetictoECEF(deg2rad(obs_lla[1]), deg2rad(obs_lla[2]),
obs_lla[3])
# Calculate user-to-sat vector
user_to_sat = sat_ecef - obs_ecef
# Caluclate satellite range
sat_range = norm(user_to_sat)
# Normalize `user_to_sat`
user_to_sat_norm = user_to_sat./sat_range
# Transform normalized user-to-sat vector to ENU
enu = R_ENU*user_to_sat_norm # [East, North, South]
# Calculate satellite azimuth
az = atan(enu[1], enu[2])
# Calculate satellite elevation
el = asin(enu[3]/norm(enu))
# Convert from degrees to radians
az = rad2deg(az)
el = rad2deg(el)
return (sat_range, az, el)
end
"""
getGPSSatnums(obs_time_JD, prns)
Get the NORAD ID for each PRN for a given observationn time in Julian Days. Read
from the `GPSData` dictionary. Returns dictionary with keys being the prns.
Required Arguments:
- `obs_time_JD`: observation time in Julian Days
- `prns`: PRNs to get NORAD ID for
Returns:
- `gps_satnums::Dict`: dictionary containing NORAD IDs for each PRN
* dictionary keys are the PRN numbers
"""
function getGPSSatnums(obs_time_JD, prns)
gps_satnums = Dict{Int,Int}()
for prn in prns
for sv in collect(keys(GPSData[prn]))
if GPSData[prn][sv]["active"]
if obs_time_JD >= GPSData[prn][sv]["start_julian_date"]
gps_satnums[prn] = GPSData[prn][sv]["satnum"]
end
else
if ((obs_time_JD > GPSData[prn][sv]["start_julian_date"]) &&
(obs_time_JD < GPSData[prn][sv]["end_julian_date"]))
gps_satnums[prn] = GPSData[prn][sv]["satnum"]
end
end
end
end
return gps_satnums
end
"""
getTLEs(obs_time_JD, satnums; Δdays=5)
Query Space-Track.org for TLEs matching time and satnum criteria. Parse and
determine TLEs for eac satnum that is closest but before the observation time.
Required Arguments:
- `obs_time_JD`: observation time in Julian Days
- `satnums`: vector of satelleite NORAD IDs
Optional Arguments:
- `Δdays`: Number of days to look before and up to `obs_time_JD`
Returns:
- `filtered_tles::Dict{Int,TLE}`: dictionary containing the TLEs for each
satellite NORAD ID
* dictionary keys are the NORAD IDs
"""
function getTLEs(obs_time_JD, satnums; Δdays=5)
obs_time_JD_begin = obs_time_JD - Δdays
obs_time_begin = JDtoDate(obs_time_JD_begin)
obs_time = JDtoDate(obs_time_JD+1)
satnum_list = string(satnums[1])
for i in 2:length(satnums)
satnum_list = string(satnum_list, ",", string(satnums[i]))
end
tle_file = string(homedir(), "/.GNSSTools/tles.tle")
date_range = string(string(obs_time_begin[1], pad=4), "-",
string(obs_time_begin[2], pad=2), "-",
string(obs_time_begin[3], pad=2), "--",
string(obs_time[1], pad=4), "-",
string(obs_time[2], pad=2), "-",
string(obs_time[3], pad=2))
login_url = "https://www.space-track.org/ajaxauth/login"
base_query = "query=https://www.space-track.org/basicspacedata/query/class/tle/EPOCH/"
query_tail = "/orderby/NORAD_CAT_ID/format/3le"
norad_cat_id = "/NORAD_CAT_ID/"
# Get username from user
print("Provide Space-Track.org username: ")
username = readline(stdin)
secret_pass = Base.getpass("Provide Space-Track.org user password")
password = read(secret_pass, String)
run(`curl -o $tle_file $login_url -d identity=$username"""&"""password=$password"""&"""$base_query$date_range$norad_cat_id$satnum_list$query_tail`;
wait=false);
Base.shred!(secret_pass)
tles = read_tle(tle_file)
filtered_tles = Dict{Int,TLE}()
for satnum in satnums
Δts = []
idxs = []
for i in 1:length(tles)
tle = tles[i]
if tle.sat_num == satnum
append!(Δts, abs(tle.epoch-obs_time_JD))
append!(idxs, i)
end
end
if ~isempty(Δts)
min_Δt_idx = argmin(Δts)
filtered_tles[satnum] = tles[idxs[min_Δt_idx]]
else
@warn "$satnum had no TLE results. Increase Δdays (currently set to $Δdays)."
end
end
return filtered_tles
end
|
proofpile-julia0005-42510 | {
"provenance": "014.jsonl.gz:242511"
} | #=
Contains code derived from the Python module "Newick" Copyright 2003-2008 by Thomas Mailund, released under the GPL v.2
=#
struct Token
name::Symbol
value::Any
# Constructor that preprocesses `:string` tokens
# TODO: Do the preprocessing elsewhere, this is too specific!
function Token(name::Symbol, str::String)
name == :string ? value = strip(str, ['"', '\'', ' ', '\n']) : value = str
value = String(value)
new(name, value)
end
end
mutable struct Lexer
input::String
next_token::Union{Nothing, Token}
tokendefs::Vector{Tuple{Symbol,Regex}}
end
"""
Return the current token in the lexer without taking it out from the lexer.
"""
function peektoken(lxr::Lexer)::Union{Token,Nothing}
isnothing(lxr.next_token) || return lxr.next_token
length(lxr.input) == 0 && return nothing
m = nothing
for (defname, defregex) in lxr.tokendefs
m = match(defregex, lxr.input)
if ! isnothing(m)
lxr.next_token = Token(defname, convert(String, m.match))
return lxr.next_token
end
end
error("Invalid token received")
end
"""
Take out the current token in the lexer and return it
"""
function next!(lxr::Lexer)::Token
token = lxr.next_token
if isnothing(token)
token = peektoken(lxr)
end
if ! isnothing(token)
# lxr.input = lxr.input[length(token.value) + 1 : end]
lxr.input = lxr.input[nextind(lxr.input, lastindex(token.value)):end]
lxr.next_token = nothing
end
return token
end
function readtoken!(lxr::Lexer, name::Symbol)::Token
token = peektoken(lxr)
if token.name ≠ name
error("Expected a `$(name)` token, but received a `$(token.name)` token. String: $(lxr.input)")
end
next!(lxr)
return token
end
function skiptoken!(lxr::Lexer, name::Symbol)::Lexer
while peektoken(lxr).name == name
readtoken!(lxr, name)
end
return lxr
end |
proofpile-julia0005-42511 | {
"provenance": "014.jsonl.gz:242512"
} | using FTCTests
using Test
@testset "FTCTests.jl" begin
include("privileged_name.jl")
include("run_sim.jl")
include("evaluate.jl")
end
|
proofpile-julia0005-42512 | {
"provenance": "014.jsonl.gz:242513"
} | """
LowRankTransform
```
P = rand(10,5)
tr = LowRankTransform(P)
```
Apply the low-rank projection realised by the matrix `P`
The second dimension of `P` must match the number of features of the target.
"""
struct LowRankTransform{T<:AbstractMatrix{<:Real}} <: Transform
proj::T
end
function set!(t::LowRankTransform{<:AbstractMatrix{T}},M::AbstractMatrix{T}) where {T<:Real}
@assert size(t) == size(M) "Size of the given matrix $(size(M)) and the projection matrix $(size(t)) are not the same"
t.proj .= M
end
params(t::LowRankTransform) = t.proj
Base.size(tr::LowRankTransform,i::Int) = size(tr.proj,i)
Base.size(tr::LowRankTransform) = size(tr.proj) # TODO Add test
function transform(t::LowRankTransform,X::AbstractMatrix{<:Real},obsdim::Int=defaultobs)
@boundscheck size(t,2) != size(X,feature_dim(obsdim)) ?
throw(DimensionMismatch("The projection matrix has size $(size(t)) and cannot be used on X with dimensions $(size(X))")) : nothing
@inbounds _transform(t,X,obsdim)
end
function transform(t::LowRankTransform,x::AbstractVector{<:Real},obsdim::Int=defaultobs) #TODO Add test
@assert size(t,2) == length(x) "Vector has wrong dimensions $(length(x)) compared to projection matrix"
t.proj*x
end
_transform(t::LowRankTransform,X::AbstractVecOrMat{<:Real},obsdim::Int=defaultobs) = obsdim == 2 ? t.proj * X : X * t.proj'
|
proofpile-julia0005-42513 | {
"provenance": "014.jsonl.gz:242514"
} | @testset "paretoset" begin
p = hcat(([x,y] for x in 0:0.2:1 for y in 0:0.2:1 if (x-1)^2+(y-1)^2<=1)...)
@test p[:,ParetoSet(p)] == [0.0 0.2 0.4 1.0; 1.0 0.4 0.2 0.0]
@test p[:,ParetoSet(p,[:Min, :Min])] == [0.0 0.2 0.4 1.0; 1.0 0.4 0.2 0.0]
@test p[:,ParetoSet(p,[EfficientGlobalOptimization.Min, EfficientGlobalOptimization.Min])] == [0.0 0.2 0.4 1.0; 1.0 0.4 0.2 0.0]
p = hcat(([x,y] for x in 0:0.2:1 for y in 0:0.2:1 if x^2+y^2<=1)...)
@test p[:,ParetoSet(p, [:Max, :Max])] == [0.0 0.6 0.8 1.0; 1.0 0.8 0.6 0.0]
@test p[:,ParetoSet(p, [EfficientGlobalOptimization.Max, EfficientGlobalOptimization.Max])] == [0.0 0.6 0.8 1.0; 1.0 0.8 0.6 0.0]
end
|
proofpile-julia0005-42514 | {
"provenance": "014.jsonl.gz:242515"
} |
map_str = ""
width, height = Int32(0), Int32(0)
open("day03/day03.in") do io
global map_str, width, height
for line in readlines(io)
map_str = map_str * line
width = length(line)
height += 1
end
end
function encounters(right::Int64, down::Int64)::Int64
posx, posy = Int64(1+right), Int64(1+down)
tree_count = Int64(0)
while posy <= height
global map_str
if map_str[(posy-1)*width+posx] == '#'
tree_count += 1
end
posx = (posx+right-1) % width + 1
posy = posy+down
end
return tree_count
end
c = encounters(1, 1)
c *= encounters(3, 1)
c *= encounters(5, 1)
c *= encounters(7, 1)
c *= encounters(1, 2)
println(c) |
proofpile-julia0005-42515 | {
"provenance": "014.jsonl.gz:242516"
} | function thermalflowlimits(m::JuMP.AbstractModel, system_formulation::Type{S}, devices::Array{B,1}, time_periods::Int64) where {B <: PowerSystems.Branch, S <: PM.AbstractDCPForm}
fbr = m[:fbr]
name_index = m[:fbr].axes[1]
time_index = m[:fbr].axes[2]
device_index = Dict(value => key for (key, value) in Dict(collect(enumerate([d.name for d in devices]))))
Flow_max_tf = JuMP.JuMPArray(Array{ConstraintRef}(undef,length(name_index), time_periods), name_index, time_index)
Flow_max_ft = JuMP.JuMPArray(Array{ConstraintRef}(undef,length(name_index), time_periods), name_index, time_index)
for t in time_index, (ix, name) in enumerate(name_index)
if name in keys(device_index)
if name == devices[device_index[name]].name
Flow_max_tf[name, t] = @constraint(m, fbr[name, t] <= devices[device_index[name]].rate)
Flow_max_ft[name, t] = @constraint(m, fbr[name, t] >= -1*devices[device_index[name]].rate)
else
@error "Branch name in Array and variable do not match"
end
else
@warn "No flow limit constraint populated for $(name)"
end
end
JuMP.register_object(m, :Flow_max_ToFrom, Flow_max_tf)
JuMP.register_object(m, :Flow_max_FromTo, Flow_max_ft)
return m
end
function thermalflowlimits(m::JuMP.AbstractModel, system_formulation::Type{S}, devices::Array{B,1}, time_periods::Int64) where {B <: PowerSystems.Branch, S <: PM.AbstractDCPLLForm}
fbr_fr = m[:fbr_fr]
fbr_to = m[:fbr_to]
name_index = m[:fbr_fr].axes[1]
time_index = m[:fbr_to].axes[2]
device_index = Dict(value => key for (key, value) in Dict(collect(enumerate([d.name for d in devices]))))
Flow_max_tf = JuMP.JuMPArray(Array{ConstraintRef}(undef,length(name_index), time_periods), name_index, time_index)
Flow_max_ft = JuMP.JuMPArray(Array{ConstraintRef}(undef,length(name_index), time_periods), name_index, time_index)
for t in time_index, (ix, name) in enumerate(name_index)
if name in keys(device_index)
if name == devices[device_index[name]].name
Flow_max_tf[name, t] = @constraint(m, fbr_fr[name, t] <= devices[device_index[name]].rate)
Flow_max_ft[name, t] = @constraint(m, fbr_to[name, t] >= -1*devices[device_index[name]].rate)
else
@error "Branch name in Array and variable do not match"
end
else
@warn "No flow limit constraint populated for $(name)"
end
end
JuMP.register_object(m, :Flow_max_ToFrom, Flow_max_tf)
JuMP.register_object(m, :Flow_max_FromTo, Flow_max_ft)
return m
end
#TODO: Implement Limits in AC. Use Norm from JuMP Implemented norms. |
proofpile-julia0005-42516 | {
"provenance": "014.jsonl.gz:242517"
} | # Note that this script can accept some limited command-line arguments, run
# `julia build_tarballs.jl --help` to see a usage message.
using BinaryBuilder
name = "libsass"
version = v"3.5.5"
# Collection of sources required to build SassBuilder
sources = [
"https://github.com/sass/libsass.git" =>
"39e30874b9a5dd6a802c20e8b0470ba44eeba929",
]
# Bash recipe for building across all platforms
script = raw"""
cd $WORKSPACE/srcdir/libsass
autoreconf --force --install
./configure --prefix=$prefix --host=$target
make -j${nproc}
make install
"""
platforms = [
# glibc Linuces
Linux(:i686),
Linux(:x86_64),
Linux(:aarch64),
Linux(:armv7l),
Linux(:powerpc64le),
# musl Linuces
Linux(:i686, :musl),
Linux(:x86_64, :musl),
Linux(:aarch64, :musl),
Linux(:armv7l, :musl),
# BSDs
MacOS(:x86_64),
FreeBSD(:x86_64),
# Windows
Windows(:i686),
Windows(:x86_64, compiler_abi = CompilerABI(:gcc7))
]
# The products that we will ensure are always built
products(prefix) = [
LibraryProduct(prefix, "libsass", :libsass_so)
]
# Dependencies that must be installed before this package can be built
dependencies = [
]
# Build the tarballs, and possibly a `build.jl` as well.
build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies)
|
proofpile-julia0005-42517 | {
"provenance": "014.jsonl.gz:242518"
} | using BenchmarkTools
BenchmarkTools.DEFAULT_PARAMETERS.samples = 100
function compute(d::Int, n::Int)::Int
function mod1(k::Float64)::Float64
k %= 1
return k < 0.5 ? k : k - 1
end
check(k::Float64)::Bool = (lower && lower_limit ≤ k < upper_limit) || (!lower && lower_limit ≥ k > upper_limit)
limit = (log(d + 1) - log(d)) / log(10)
lower_limit, upper_limit = log(d) / log(10) % 1, log(d + 1) / log(10) % 1
lower = lower_limit < upper_limit
error = log(2) / log(10)
x = error % 1
t, convergent, convergent_error = 0, 1, 1
while abs(convergent_error) > limit
x, integer = modf(1 / x)
convergent, t = convergent * integer + t, convergent
convergent_error = mod1(convergent * error)
end
x, integer = modf(1 / x)
semi_convergent, semi_convergent_error = t, 1
while abs(semi_convergent_error) > limit
semi_convergent += convergent
semi_convergent_error = mod1(semi_convergent * error)
end
differences = (convergent, semi_convergent, convergent + semi_convergent)
if convergent_error > 0
convergent_limit = convergent * ceil(lower_limit / convergent_error)
else
convergent_limit = semi_convergent * ceil(lower_limit / semi_convergent_error)
end
convergents = Vector{Float64}()
while convergent < convergent_limit
append!(convergents, convergent)
convergent, t = integer * convergent + t, convergent
x, integer = modf(1 / x)
end
convergents = convergents[2:end]
for difference ∈ reverse(convergents)
while convergent_limit > difference
x = (convergent_limit - difference) * error % 1
if check(x)
convergent_limit -= difference
else
break
end
end
end
while true
checked = false
for difference ∈ differences
if difference > convergent_limit
continue
end
x = (convergent_limit - difference) * error % 1
if check(x)
checked = true
convergent_limit -= difference
break
end
end
if !checked break end
end
while true
for difference ∈ differences
x = (convergent_limit + difference) * error % 1
if check(x)
n -= 1
if n == 0
return trunc(Int, convergent_limit)
end
convergent_limit += difference
break
end
end
end
end
compute(12, 1)
compute(12, 2)
compute(123, 45)
compute(123, 678910)
@benchmark compute(123, 678910) |
proofpile-julia0005-42518 | {
"provenance": "014.jsonl.gz:242519"
} | # This file is a part of BAT.jl, licensed under the MIT License (MIT).
using Test
Test.@testset "rngs" begin
include("test_rng_init.jl")
end
|
proofpile-julia0005-42519 | {
"provenance": "014.jsonl.gz:242520"
} | using ADMPS
using ADMPS: model_tensor, tensorfromclassical
using Test
@testset "exampletensor" begin
β = rand()
@test model_tensor(Ising(β)) ≈ tensorfromclassical([β -β; -β β])
@test mag_tensor(Ising(β)) !== nothing
@test energy_tensor(Ising(β)) !== nothing
end
|
proofpile-julia0005-42520 | {
"provenance": "014.jsonl.gz:242521"
} | module Turkie
using AbstractPlotting: Scene, Point2f0
using AbstractPlotting: barplot!, lines!, scatter! # Plotting tools
using AbstractPlotting: Observable, Node, lift, on # Observable tools
using AbstractPlotting: recordframe! # Recording tools
using AbstractPlotting.MakieLayout # Layouting tool
using Colors, ColorSchemes # Colors tools
using KernelDensity # To be able to give a KDE
using OnlineStats # Estimators
using DynamicPPL: VarInfo, Model
export TurkieParams, TurkieCallback
export addIO!, record
include("online_stats_plots.jl")
const std_colors = ColorSchemes.seaborn_colorblind
name(s::Symbol) = string(s)
name(s::OnlineStat) = nameof(typeof(s))
"""
TurkieCallback(model::DynamicPPL.Model, plots::Series/AbstractVector = )
## Keyword arguments
- `showtrace=true` : Show the trace of the variable
- `window=0` : Use a window for plotting the trace, 0 will not use a window
"""
TurkieCallBack
struct TurkieCallback
scene::Scene
data::Dict{Symbol, MovingWindow}
axis_dict::Dict
vars::Dict{Symbol, Any}
params::Dict{Any, Any}
iter::Observable{Int64}
end
function TurkieCallback(model::Model, plots::Union{Series, AbstractVector} = [:histkde, Mean(), Variance(), AutoCov(20)]; kwargs...)
variables = VarInfo(model).metadata
return TurkieCallback(Dict(Pair.(keys(variables), Ref(plots))),
Dict(kwargs...))
end
function TurkieCallback(varsdict::Dict; kwargs...)
return TurkieCallback(varsdict, Dict(kwargs...))
end
function TurkieCallback(vars::Dict, params::Dict)
# Create a scene and a layout
outer_padding = 5
scene, layout = layoutscene(outer_padding, resolution = (1200, 700))
display(scene)
window = get!(params, :window, 1000)
n_rows = length(keys(vars))
n_cols = maximum(length.(values(vars)))
n_plots = n_rows * n_cols
iter = Node(0)
data = Dict{Symbol, MovingWindow}(:iter => MovingWindow(window, Int64))
obs = Dict{Symbol, Any}()
axis_dict = Dict()
for (i, (variable, plots)) in enumerate(vars)
data[variable] = MovingWindow(window, Float32)
axis_dict[(variable, :varname)] = layout[i, 1, Left()] = Label(scene, string(variable), textsize = 30)
axis_dict[(variable, :varname)].padding = (0, 50, 0, 0)
onlineplot!(scene, layout, axis_dict, plots, iter, data, variable, i)
end
on(iter) do i
if i > 10 # To deal with autolimits a certain number of samples are needed
for (variable, plots) in vars
for p in plots
autolimits!(axis_dict[(variable, p)])
end
end
end
end
MakieLayout.trim!(layout)
TurkieCallback(scene, data, axis_dict, vars, params, iter)
end
function addIO!(cb::TurkieCallback, io)
cb.params[:io] = io
end
function (cb::TurkieCallback)(rng, model, sampler, transition, iteration)
fit!(cb.data[:iter], iteration)
for (vals, ks) in values(transition.θ)
for (k, val) in zip(ks, vals)
if haskey(cb.data, Symbol(k))
fit!(cb.data[Symbol(k)], Float32(val))
end
end
end
cb.iter[] += 1
if haskey(cb.params, :io)
recordframe!(cb.params[:io])
end
end
end
|
proofpile-julia0005-42521 | {
"provenance": "014.jsonl.gz:242522"
} | struct IndividualState #TODO change to immutable
health::HealthState
freedom::FreedomState
detected::DetectionStatus
quarantine_level::SafeUInt8
end
IndividualState() = IndividualState(
Healthy,
Free,
Undetected,
0
)
show(io::IO, s::IndividualState) = print(io, "(",s.health, ", ", s.freedom, ", ", s.detected, ", ", s.quarantine_level, ")")
|
proofpile-julia0005-42522 | {
"provenance": "014.jsonl.gz:242523"
} |
@testset "SparseRadialMap Multi-threading test with optimization and evaluation of the resulting map" begin
X = Matrix([6.831108232125667 6.831108465893829 10.748176564682273 17.220877261555913;
7.341399771164814 7.341399033871628 11.440770534993137 18.015116463525178;
6.877822405336048 6.877822863906546 10.793381797612751 17.379545045967195;
6.971116690210811 6.971117274254750 10.941318835214862 17.571340756941474;
7.800189373932513 7.800189247976281 11.929084958885008 18.852191934527564;
6.706165535739543 6.706164938380805 10.596868885136731 17.100665224055952;
7.162689725595848 7.162690676866085 11.236535720904731 17.691824497167673;
7.700262639909202 7.700262776963255 11.870709137884843 18.589199307190668;
6.941836510989174 6.941836045832242 10.822143995520504 17.705463568541791;
7.284400905037024 7.284402833890172 11.438244192350945 17.713180862215779;
6.114110264862537 6.114110399157427 9.744642327011279 16.427244631267527;
6.884587321648968 6.884587349033709 10.880879551471059 17.360443337222691;
6.964593674795581 6.964592742775720 11.015485574206627 17.379079978320874;
7.174466645033212 7.174468203234895 11.165089908005559 17.802087293640490;
7.625148752606352 7.625148374481078 11.784628799983244 18.470370177818385;
7.878948627898106 7.878948112320454 12.118366730171301 18.770876038639738;
7.406860218451342 7.406861450644357 11.470902695133232 18.293040279245396;
7.308212626757207 7.308212927902813 11.381148951581917 17.962547003679237;
7.519983057000856 7.519981781231936 11.597728250168640 18.407564589808729;
7.813106349722561 7.813107415959238 11.973288464744240 18.802480702520111]')
order = [[-1], [1; 1], [-1; 1; 0], [-1; 1; 1; 0]]
T = SparseRadialMap(4, order; λ = 0.0)
Tserial = SparseRadialMap(4, order; λ = 0.0)
Tthread = SparseRadialMap(4, order; λ = 0.0)
# Run serial code
optimize(T, X, nothing; start = 2)
# Run serial code
optimize(Tserial, X, nothing; start = 2, P = serial)
# Run multi-threading code
@show Threads.nthreads()
optimize(Tthread, X, nothing; start = 2, P = thread)
@test norm(T(X[:,1]) - [6.831108232125667;
0.07966241537360474;
-0.18141931240603526;
-2.0970508045942893])<1e-8
@test norm(Tserial(X[:,1]) - [6.831108232125667;
0.07966241537360474;
-0.18141931240603526;
-2.0970508045942893])<1e-8
@test norm(Tthread(X[:,1]) - [6.831108232125667;
0.07966241537360474;
-0.18141931240603526;
-2.0970508045942893])<1e-8
end
|
proofpile-julia0005-42523 | {
"provenance": "014.jsonl.gz:242524"
} | ################################################################################
# Copyright 2020, Marta Vanin, Tom Van Acker #
################################################################################
# PowerModelsDistributionStateEstimation.jl #
# An extention package of PowerModels(Distribution).jl for Static Power System #
# State Estimation. #
################################################################################
# using pkgs
using Distributions, GaussianMixtures
using HDF5
using Ipopt
using PowerModels, PowerModelsDistribution
using PowerModelsDistributionStateEstimation
#using SCS #removed while SDP tests are not active
using Test
# pkg const
const _DST = Distributions
const _GMM = GaussianMixtures
const _PMD = PowerModelsDistribution
const _PMDSE = PowerModelsDistributionStateEstimation
#network and feeder from ENWL for tests
ntw, fdr = 4, 2
season = "summer"
time_step = 144
elm = ["load", "pv"]
pfs = [0.95, 0.90]
rm_transfo = true
rd_lines = true
# set solvers
ipopt_solver = optimizer_with_attributes(Ipopt.Optimizer,"max_cpu_time"=>300.0,
"tol"=>1e-9,
"print_level"=>0)
# scs_solver = optimizer_with_attributes(SCS.Optimizer, "max_iters"=>20000, "eps"=>1e-5,
# "alpha"=>0.4, "verbose"=>0) #deactivated while SDP tests not active
@testset "PowerModelsDistributionStateEstimation" begin
include("bad_data.jl")
include("distributions.jl")
include("estimation_criteria.jl")
include("mixed_measurements.jl")
include("non_exact_forms.jl")
include("power_flow.jl")
include("pseudo_measurements.jl")
include("single_conductor_branches.jl")
include("utils_and_start_val.jl")
include("with_errors.jl")
end
|
proofpile-julia0005-42524 | {
"provenance": "014.jsonl.gz:242525"
} | module Day20
using AdventOfCode2019
function day20(input::String = readInput(joinpath(@__DIR__, "..", "data", "day20.txt")))
return solve(input, (true, true))
end
function solve(input, parts)
solution = Array{Int,1}(undef, sum(parts))
cm = char_matrix(input)
imat = int_matrix(cm)
specialPoints = find_portals(cm)
portals = create_portal_lookup(specialPoints)
start = specialPoints["AA"][1]
finish = specialPoints["ZZ"][1]
if parts[1]
solution[1] = flood!(copy(imat), portals, start, finish)
end
if parts[2]
solution[2] = flood_levels(imat, portals, start, finish)
end
return solution
end
function char_matrix(input::String)
m = split.(split(input, "\n"), "")
nrows = length(m)
nrows > 1 || throw(ArgumentError("Invalid input"))
ncols = length(m[1])
if length(m[end]) <= 1
nrows -= 1
end
for i = 1:nrows
length(m[i]) == ncols || throw(ArgumentError("Invalid input in row $i"))
end
charmat = Array{Char, 2}(undef, nrows, ncols)
for i = 1:nrows
for (j, s) in enumerate(m[i])
charmat[i, j] = m[i][j][1]
end
end
return charmat
end
function int_matrix(charmat::Array{Char,2})
m, n = size(charmat)
imat = Array{Int,2}(undef, m, n)
for i = 1:m
for j = 1:n
if charmat[i, j] == '.' # walkable square
imat[i, j] = -1
elseif isuppercase(charmat[i, j]) # letter
imat[i, j] = -2
else # non-walkable square
imat[i, j] = -3
end
end
end
return imat
end
function find_portals(charmat::Array{Char,2})
letterDict = Dict{String,Array{CartesianIndex,1}}()
letterPos = findall(x -> isuppercase(x), charmat)
m, n = size(charmat)
for ci in letterPos
i, j = ci.I
if j + 1 <= n && isuppercase(charmat[i, j + 1])
s = join(charmat[i, j:j+1])
if !haskey(letterDict, s)
letterDict[s] = Array{CartesianIndex,1}()
end
if j + 2 <= n && charmat[i, j + 2] == '.'
push!(letterDict[s], CartesianIndex(i, j + 2))
elseif j - 1 >= 0 && charmat[i, j - 1] == '.'
push!(letterDict[s], CartesianIndex(i, j - 1))
end
end
if i + 1 <= m && isuppercase(charmat[i + 1, j])
s = join(charmat[i:i+1, j])
if !haskey(letterDict, s)
letterDict[s] = Array{CartesianIndex,1}()
end
if i + 2 <= m && charmat[i + 2, j] == '.'
push!(letterDict[s], CartesianIndex(i + 2, j))
elseif i - 1 >= 0 && charmat[i - 1, j] == '.'
push!(letterDict[s], CartesianIndex(i - 1, j))
end
end
end
return letterDict
end
function create_portal_lookup(letterDict::Dict{String,Array{CartesianIndex,1}})
lookup = Dict{CartesianIndex,CartesianIndex}()
for (k, v) in letterDict
(k == "AA" || k == "ZZ") && continue
length(v) == 2 || throw(AssertionError("Too many indices for portal $k"))
lookup[v[1]] = v[2]
lookup[v[2]] = v[1]
end
return lookup
end
function flood!(imat::Array{Int,2}, portals::Dict{CartesianIndex,CartesianIndex}, start::CartesianIndex, finish::CartesianIndex)
imat[start] = 0
queue = Array{CartesianIndex,1}()
push!(queue, start)
while length(queue) > 0
current = popfirst!(queue)
if current == finish
return imat[current]
end
for (k, l) in ((0, 1), (0, -1), (1, 0), (-1, 0))
if imat[(current.I .+ (k, l))...] == -1
p = CartesianIndex(current.I .+ (k, l))
imat[p] = imat[current] + 1
push!(queue, p)
end
end
if haskey(portals, current)
p = portals[current]
if imat[p] == -1
imat[p] = imat[current] + 1
push!(queue, p)
end
end
end
end
function is_outer(c::CartesianIndex, m::Int, n::Int)
(c[1] == 3 || c[1] == m - 2) && return true
(c[2] == 3 || c[2] == n - 2) && return true
return false
end
function flood_levels(imat::Array{Int,2}, portals::Dict{CartesianIndex,CartesianIndex}, start::CartesianIndex, finish::CartesianIndex)
maze = Dict{Int,Array{Int,2}}()
maze[0] = copy(imat)
maze[0][start] = 0
m, n = size(imat)
queue = Array{Tuple{CartesianIndex,Int},1}()
level = 0
push!(queue, (start, level))
while length(queue) > 0
current, level = popfirst!(queue)
if level == 0 && current == finish
return maze[level][current]
end
for (k, l) in ((0, 1), (0, -1), (1, 0), (-1, 0))
if maze[level][(current.I .+ (k, l))...] == -1
p = CartesianIndex(current.I .+ (k, l))
maze[level][p] = maze[level][current] + 1
push!(queue, (p, level))
end
end
if haskey(portals, current)
if !is_outer(current, m, n)
p = portals[current]
if !haskey(maze, level + 1)
maze[level + 1] = copy(imat)
end
if maze[level + 1][p] == -1
maze[level + 1][p] = maze[level][current] + 1
push!(queue, (p, level + 1))
end
elseif level > 0 # only inner "portals" allowed for level 0
p = portals[current]
if maze[level - 1][p] == -1
maze[level - 1][p] = maze[level][current] + 1
push!(queue, (p, level - 1))
end
end
end
end
end
end # module
|
proofpile-julia0005-42525 | {
"provenance": "014.jsonl.gz:242526"
} | using DataFrames
using Gadfly
# Read the datasource
include(Pkg.dir("BismarkSummary","src","bismark-report.jl"))
#using BismarkSummary
sampleinfo_path = joinpath(Pkg.dir(), "BismarkSummary","testdata","datasource.tsv")
pipeline_name = nothing
run_number = nothing
# Get filenames of bismark reports
test_bismark_report1 = joinpath(Pkg.dir(), "BismarkSummary","testdata","bismark_report.txt")
test_bismark_report2 = joinpath(Pkg.dir(), "BismarkSummary","testdata","bismark_report1.txt")
test_bismark_report3 = joinpath(Pkg.dir(), "BismarkSummary","testdata","bismark_report2.txt")
bismark_report_filenames = [test_bismark_report1,test_bismark_report2,test_bismark_report3]
bismark_report_filenames
sampleinfo = readtable(sampleinfo_path)
report_dict=parse_bismark_reports( bismark_report_filenames)
methods(parse_bismark_reports)
append_report_info_to_sampleinfo!(sampleinfo,report_dict)
report_dict
sampleinfo
Gadfly.plot(sampleinfo,x="sex",y="Mapping efficiency", Geom.boxplot)
|
proofpile-julia0005-42526 | {
"provenance": "014.jsonl.gz:242527"
} |
# function Crst_direct(r::Int, s::Int, t::Int)
# # direct method to calculate Crst
# # see Products of Laguerre Polynomials
# # Joseph Gillis and George Weiss
# # Mathematics of Computation, Vol. 14, No. 69 (Jan., 1960), pp. 60-63
# # this function implements equation (7)
# # this is computationally expensive.
# # use recurrence relation instead.
# (r >= 0 && s >= 0 && t >= 0) || throw("negative orders not allowed.")
# # t needs to be in |r-s| and r+s
# # otherwise the coef is zero
# if t < abs(r - s) || t > r + s
# return 0
# end
# p = r + s - t
# # define nmax and nmin and find them
# nmax = r
# if s < nmax
# nmax = s
# end
# if p < nmax
# nmax = p
# end
# nmin = ceil(Int, p / 2)
# # use rational numbers for now to keep it exact.
# C = 0 // 1
# for n in nmin:nmax
# C += (2 // 1)^(2 * n) * factorial(r + s - n) / ( factorial(r - n) * factorial(s - n) * factorial(2n - p) * factorial(p - n))
# end
# # prefactor
# C *= (-1 // 2)^p
# end
function Crst(r::Int, s::Int, t::Int)
# this implements equation (22) and (23) in the same paper.
if r < 0 || s < 0 || t < 0
return 0 // 1
end
# (r >= 0 && s >= 0 && t >= 0) || throw("negative orders not allowed.")
# notice that C_{rst} is symmetric with respect to any swapping of indices.
# take r to be the larer one of r and s
# the recurrence is given for t+1
# shift it to calculate for t;
if r < s
r, s = s, r # swap.
end
# t needs to be in |r-s| and r+s
# otherwise the coef is zero
if t < r - s || t > r + s
return 0 // 1
end
sigma = r + s + 1 // 1
delta = r - s
if t == delta
return Rational(binomial(r, s))
end
# handle general case.
f1 = t * (delta^2 - t^2)
f2 = (2t - 1) * (delta^2 - (t - 1)^2 + t * (sigma - t ))
f3 = -(t - 1) * (delta^2 - (t - 2)^2 + (4t - 3) * (sigma - t + 1))
f4 = 2(t - 1) * (t - 2) * (sigma - t + 2)
return (f2 * Crst(r, s, t - 1) + f3 * Crst(r, s, t - 2) + f4 * Crst(r, s, t - 3) ) / f1
end
function laprodexpand(r::Int, s::Int)
# expand the product of two Laguerre polynomials (with coefficients being 1) into a linear
# combination of Laguerre polynomials.
# Lr(x)Ls(x) into sum_{t}C_{rst}Lt(x)
orders = abs(r - s):(r + s) |> collect
coeffs = [Crst(r, s, t) for t in orders]
return orders, coeffs
end
function Base.:*(l1::LaguerrePolynomial{T}, l2::LaguerrePolynomial{S}) where {T,S}
# take two linear combinations of Laguerre polynomials.
# and reexpand into a single Laguerre polynomial series.
lst = []
R = promote_type(T, S)
for (r, c1) in l1
for (s, c2) in l2
# construct the product
orders, coeffs = laprodexpand(r, s)
# convert to type R
coeffs = R.(coeffs)
# add this to the list
la = LaguerrePolynomial(orders, coeffs)
la = la * c1 * c2
push!(lst, la)
end
end
# add elements in the list.
la = lst[1]
if length(lst) > 1
for i = 2:length(lst)
la += lst[i]
end
end
return la
end |
proofpile-julia0005-42527 | {
"provenance": "014.jsonl.gz:242528"
} | using Flux: convfilter, Zeros, outdims
# Build a VGG block
# ifilters: number of input filters
# ofilters: number of output filters
# batchnorm: add batchnorm
function vgg_block(ifilters, ofilters, depth, batchnorm)
k = (3,3)
p = (1,1)
layers = []
for l in 1:depth
if batchnorm
w = convfilter(k, ifilters=>ofilters)
b = Zeros()
push!(layers, Conv(weight=w, bias=b, pad=p))
push!(layers, BatchNorm(ofilters, relu))
else
push!(layers, Conv(k, ifilters=>ofilters, relu, pad=p))
end
ifilters = ofilters
end
return layers
end
# Build convolutionnal layers
# config: :A (vgg11) :B (vgg13) :D (vgg16) :E (vgg19)
# inchannels: number of channels in input image (3 for RGB)
function convolutional_layers(config, batchnorm, inchannels)
layers = []
ifilters = inchannels
for c in config
push!(layers, vgg_block(ifilters, c..., batchnorm)...)
push!(layers, MaxPool((2,2)))
ifilters, _ = c
end
return layers
end
# Build classification layers
# imsize: image size (w, h, c)
# nclasses: number of classes
# fcsize: size of fully connected layers (usefull for smaller nclasses than ImageNet)
# dropout: dropout obviously
function classifier_layers(imsize, nclasses, fcsize, dropout)
layers = []
push!(layers, flatten)
push!(layers, Dense(Int(prod(imsize)), fcsize, relu))
push!(layers, Dropout(dropout))
push!(layers, Dense(fcsize, fcsize, relu))
push!(layers, Dropout(dropout))
push!(layers, Dense(fcsize, nclasses))
push!(layers, softmax)
return layers
end
function vgg(imsize; config, inchannels, batchnorm=false, nclasses, fcsize, dropout)
conv = convolutional_layers(config, batchnorm, inchannels)
imsize = foldl((insize, layer) -> outdims(layer, insize), conv; init = imsize)
class = classifier_layers((imsize..., config[end][1]), nclasses, fcsize, dropout)
return Chain(conv..., class...)
end
const configs = Dict(:A => [(64,1), (128,1), (256,2), (512,2), (512,2)],
:B => [(64,2), (128,2), (256,2), (512,2), (512,2)],
:D => [(64,2), (128,2), (256,3), (512,3), (512,3)],
:E => [(64,2), (128,2), (256,4), (512,4), (512,4)])
vgg11(imsize; inchannels=3, nclasses=1000, fcsize=4096, dropout=0.5) =
vgg(imsize, config=configs[:A], inchannels=inchannels, nclasses=nclasses, fcsize=fcsize, dropout=dropout)
vgg11bn(imsize; inchannels=3, nclasses=1000, fcsize=4096, dropout=0.5) =
vgg(imsize, config=configs[:A], batchnorm=true, inchannels=inchannels, nclasses=nclasses, fcsize=fcsize, dropout=dropout)
vgg13(imsize; inchannels=3, nclasses=1000, fcsize=4096, dropout=0.5) =
vgg(imsize, config=configs[:B], inchannels=inchannels, nclasses=nclasses, fcsize=fcsize, dropout=dropout)
vgg13bn(imsize; inchannels=3, nclasses=1000, fcsize=4096, dropout=0.5) =
vgg(imsize, config=configs[:B], batchnorm=true, inchannels=inchannels, nclasses=nclasses, fcsize=fcsize, dropout=dropout)
vgg16(imsize; inchannels=3, nclasses=1000, fcsize=4096, dropout=0.5) =
vgg(imsize, config=configs[:D], inchannels=inchannels, nclasses=nclasses, fcsize=fcsize, dropout=dropout)
vgg16bn(imsize; inchannels=3, nclasses=1000, fcsize=4096, dropout=0.5) =
vgg(imsize, config=configs[:D], batchnorm=true, inchannels=inchannels, nclasses=nclasses, fcsize=fcsize, dropout=dropout)
vgg19(imsize; inchannels=3, nclasses=1000, fcsize=4096, dropout=0.5) =
vgg(imsize, config=configs[:E], inchannels=inchannels, nclasses=nclasses, fcsize=fcsize, dropout=dropout)
vgg19bn(imsize; inchannels=3, nclasses=1000, fcsize=4096, dropout=0.5) =
vgg(imsize, config=configs[:E], batchnorm=true, inchannels=inchannels, nclasses=nclasses, fcsize=fcsize, dropout=dropout)
|
proofpile-julia0005-42528 | {
"provenance": "014.jsonl.gz:242529"
} | ################################################################################
# CutPruners
# A package to manage polyhedral convex functions
################################################################################
module CutPruners
using LinearAlgebra, SparseArrays
using JuMP
# Redudancy checking
include("redund.jl")
include("abstract.jl")
include("avg.jl")
include("decay.jl")
include("dematos.jl")
include("exact.jl")
end # module
|
proofpile-julia0005-42529 | {
"provenance": "014.jsonl.gz:242530"
} | using DrWatson
@quickactivate "FractalDimension"
include(srcdir("style.jl"))
using PyPlot, DataFrames, CSVFiles
# %% computation times for different lengths
data = wload(datadir("benchmark", "benchmark_number_NofN=10_base=10_lowerN=3_upperN=5.csv")) |> DataFrame
fig, axs = subplots(2,1; sharex = true, figsize = (10, 10))
for (i, name) in enumerate(unique(data.Method))
color = "C$(i-1)"
lorenz_data = data[(data.Model .== "lorenz") .& (data.Method .== name), :]
axs[1].plot(lorenz_data.N, lorenz_data.Time, label = name, color = color)
hénon_data = data[(data.Model .== "henon") .& (data.Method .== name), :]
axs[2].plot(hénon_data.N, hénon_data.Time, color = color)
end
axs[2].set_xlabel("\$N\$")
axs[2].set_ylabel("\$t_{Hénon}[\\mathrm{ns}]\$")
axs[1].set_ylabel("\$t_{Lorenz63}[\\mathrm{ns}]\$")
axs[1].set_yscale("log")
axs[2].set_yscale("log")
axs[1].set_xscale("log")
axs[2].set_xscale("log")
for ax in axs; ax.legend(loc = "upper left"); ax.grid(); end
axs[1].tick_params(labelbottom=false)
axs[1].set_title("minimum computation time")
wsave(plotsdir("benchmarks", "comparing_btime_N.png"), fig)
# %% computation times for different system dimensions
data = wload(datadir("benchmark", "benchmark_dim_N=10000_ddim=1_lowerdim=4_upperdim=16.csv")) |> DataFrame
fig, axs = subplots(1,1; sharex = true, figsize = (10, 16))
for (i, method) in enumerate(unique(data.Method))
color = "C$(i-1)"
x = data[data.Method .== method, :Dimension]
y = data[data.Method .== method, :Time]
axs.plot(x, y, color = color, label = method)
end
axs.set_xlabel("\$\\mathrm{Dimension}\$")
axs.set_ylabel("\$t [\\mathrm{ns}]\$")
axs.set_yscale("log")
axs.legend(loc = "upper left")
axs.grid()
axs.set_title("minimum computation time (N=10000)")
wsave(plotsdir("benchmarks", "comparing_btime_dim.png"), fig)
# %% comparison of computation times for length and dimensionality
path1 = "benchmark_dim_N=50000_ddim=1_lowerdim=4_upperdim=16.csv"
path2 = "benchmark_number_NofN=10_base=10_lowerN=3_upperN=5.csv"
titlename = "\$ \\mathrm{Computation}\\;\\mathrm{Time}\\;\\mathrm{for}\\;\\mathrm{Calculation}\\;\\mathrm{of}\\;\\mathrm{Probabilities} \$"
labels = [
"\$ C_2^\\mathrm{classic} \$",
"\$ C_2^\\mathrm{tree} \$",
"\$ C_2^\\mathrm{box} \$",
"\$ C_2^\\mathrm{prism} \$",
"\$ H_1^\\mathrm{Molteno} \$",
"\$ H_1^\\mathrm{classic} \$",
]
function plot_dim_length(title, labels, path1, path2; save_figure = true)
data_dim = wload(
datadir(
"benchmark",
path1,
)) |> DataFrame
data_var = wload(
datadir(
"benchmark",
path2,
)) |> DataFrame
data_length = data_var[(data_var.Model .== "lorenz"), :]
fig, axs = subplots(2,1; sharex = false)
c_names = Dict()
ls_names = Dict()
agents = []
for (i, method) in enumerate(unique(data_dim.Method))
color = "C$(i-1)"
ls = LINESTYLES[i]
c_names[method] = color
ls_names[method] = ls
x = data_dim[data_dim.Method .== method, :Dimension]
y = data_dim[data_dim.Method .== method, :Time]
agent, = axs[1].plot(x, y, color = color, label = method, ls = ls)
push!(agents, agent)
end
for method in unique(data_length.Method)
if method == "Correlation"
color = c_names["Classic Correlation"]
ls = ls_names["Classic Correlation"]
elseif method == "Generalized Dimension"
color = c_names["Generalized Entropy"]
ls = ls_names["Generalized Entropy"]
else
color = c_names[method]
ls = ls_names[method]
end
x = data_length[data_length.Method .== method, :N]
y = data_length[data_length.Method .== method, :Time]
axs[2].plot(x, y, color = color, label = nothing, ls = ls)
end
for ax in axs; ax.set_yscale("log"); ax.grid(true); end
axs[1].set_xlabel("\$ \\mathrm{Dimension} \$")
axs[1].set_xticks(4:4:16)
axs[1].set_xlim((4,16))
axs[1].set_title("\$ \\mathrm{Lorenz-96} \\quad N=50000 \$")
axs[1].set_ylabel("\$t [\\mathrm{ns}]\$")
axs[1].set_yticks(10 .^ (7:12))
leg = axs[1].legend(
handles = agents,
labels = labels,
bbox_to_anchor=(0., 1.25, 1., .102), loc="lower left",
ncol=3, mode="expand", borderaxespad=0, handlelength=2,
title = title,
)
axs[2].set_xlabel("\$\\mathrm{N}\$")
axs[2].set_yticks(10 .^ (5:2:12))
axs[2].set_xlim((10^3, 10^5))
axs[2].set_xscale("log")
axs[2].set_title("\$ \\mathrm{Lorenz-63} \$")
#axs[2].legend().remove()
axs[2].set_ylabel("\$t [\\mathrm{ns}]\$")
axs[1].add_artist(leg)
fig.tight_layout()
fig.subplots_adjust(top = 1.0, hspace = 0.5, left = 0.2)
fig
end
fig = plot_dim_length(title, labels, path1, path2)
fig.tight_layout(;pad = 0.3)
wsave(plotsdir("benchmarks", "comparing_btime_dim_N.png"), fig)
|
proofpile-julia0005-42530 | {
"provenance": "014.jsonl.gz:242531"
} | using FSM
using CSV
using DataFrames
using Plots
data_force = CSV.File("data/met_CdP_0506.csv") |> DataFrame
data_ref = CSV.File("fortran/output/out_CdP_0506_11111.txt", header=["year", "month", "day", "hour", "alb", "Roff", "snowdepth", "SWE", "Tsurf", "Tsoil"], delim=" ", ignorerepeated=true) |> DataFrame
input = Input{Float64}(
data_force.year,
data_force.month,
data_force.day,
data_force.hour,
data_force.SW,
data_force.LW,
data_force.Sf,
data_force.Rf,
data_force.Ta,
data_force.RH,
data_force.Ua,
data_force.Ps,
)
ebm = EBM{Float64}(
am=1,
cm=1,
dm=1,
em=1,
hm=1,
zT=1.5,
zvar=false,
Tsoil=[282.98, 284.17, 284.70, 284.70]
)
cn = Constants{Float64}()
snowdepth = similar(input.Ta)
SWE = similar(input.Ta)
Tsurf = similar(input.Ta)
run!(ebm, cn, snowdepth, SWE, Tsurf, input)
plot(data_ref.SWE - SWE)
plot(data_ref.Tsurf - Tsurf .+ 273.15)
|
proofpile-julia0005-42531 | {
"provenance": "014.jsonl.gz:242532"
} | if !("." in LOAD_PATH)
push!(LOAD_PATH, ".")
end
import Coverage
import Plots
# https://github.com/jheinen/GR.jl/issues/278#issuecomment-587090846
ENV["GKSwstype"] = "nul"
all_cases = [
# "ARM_SGP",
# "Bomex",
# "DryBubble",
# "DYCOMS_RF01",
# "GABLS",
# "GATE_III",
# "life_cycle_Tan2018",
# "Nieuwstadt",
"Rico",
# "Soares",
# "SP",
"TRMM_LBA",
"LES_driven_SCM",
]
filter!(x -> x ≠ "GATE_III", all_cases) # no mse tables for GATE_III
filter!(x -> x ≠ "SP", all_cases) # not currently running SP
allocs = Dict()
for case in all_cases
ENV["ALLOCATION_CASE_NAME"] = case
run(`julia --project=test/ --track-allocation=user perf/alloc_per_case.jl`)
allocs[case] = Coverage.analyze_malloc(".")
# Clean up files
all_files = [joinpath(root, f) for (root, dirs, files) in Base.Filesystem.walkdir(".") for f in files]
all_mem_files = filter(x -> endswith(x, ".mem"), all_files)
for f in all_mem_files
rm(f)
end
end
@info "Post-processing allocations"
function plot_allocs(case_name, allocs_per_case, n_unique_bytes)
p = Plots.plot()
@info "Allocations for $case_name"
filename_only(fn) = first(split(fn, ".jl")) * ".jl"
function compile_tc(fn, linenumber)
c1 = endswith(filename_only(fn), "TurbulenceConvection.jl")
c2 = linenumber == 1
return c1 && c2
end
filter!(x -> x.bytes ≠ 0, allocs_per_case)
filter!(x -> !compile_tc(x.filename, x.linenumber), allocs_per_case)
for alloc in allocs_per_case
println(alloc)
end
println("Number of allocating sites: $(length(allocs_per_case))")
case_bytes = getproperty.(allocs_per_case, :bytes)[end:-1:1]
case_filename = getproperty.(allocs_per_case, :filename)[end:-1:1]
case_linenumber = getproperty.(allocs_per_case, :linenumber)[end:-1:1]
all_bytes = Int[]
filenames = String[]
linenumbers = Int[]
loc_ids = String[]
for (bytes, filename, linenumber) in zip(case_bytes, case_filename, case_linenumber)
compile_tc(filename, linenumber) && continue # Skip loading module
loc_id = "$(filename_only(filename))" * "$linenumber"
if !(bytes in all_bytes) && !(loc_id in loc_ids)
push!(all_bytes, bytes)
push!(filenames, filename)
push!(linenumbers, linenumber)
push!(loc_ids, loc_id)
if length(all_bytes) ≥ n_unique_bytes
break
end
end
end
all_bytes = all_bytes ./ 10^3
max_bytes = maximum(all_bytes)
@info "$case_name: $all_bytes"
xtick_name(filename, linenumber) = "$filename, line number: $linenumber"
markershape = (:square, :hexagon, :circle, :star, :utriangle, :dtriangle)
for (bytes, filename, linenumber) in zip(all_bytes, filenames, linenumbers)
Plots.plot!(
[0],
[bytes];
seriestype = :scatter,
label = xtick_name(filename_only(filename), linenumber),
markershape = markershape[1],
markersize = 1 + bytes / max_bytes * 10,
)
markershape = (markershape[end], markershape[1:(end - 1)]...)
end
p1 = Plots.plot!(ylabel = "Allocations (KB)", title = case_name)
p2 = Plots.plot(
1:length(allocs_per_case),
getproperty.(allocs_per_case, :bytes)[end:-1:1] ./ 1000;
xlabel = "i-th allocating line (sorted)",
ylabel = "Allocations (KB)",
markershape = :circle,
)
Plots.plot(p1, p2, layout = Plots.grid(2, 1))
Plots.savefig(joinpath(folder, "allocations_$case_name.png"))
end
folder = "perf/allocations_output"
mkpath(folder)
@info "Allocated bytes for single tendency per case:"
for case in all_cases
plot_allocs(case, allocs[case], 10)
end
|
proofpile-julia0005-42532 | {
"provenance": "014.jsonl.gz:242533"
} | """
Rimu.EmbarrassinglyDistributed
Module that provides an embarrassingly parallel option for breaking up long
time-series with `lomc!()` into chunks performed in parallel using the
`Distributed` package.
### Exports:
* [`d_lomc!()`](@ref) - run [`lomc!()`](@ref) in embarrassingly parallel mode
* [`combine_dfs()`](@ref) - combine the resulting `DataFrame`s to a single one
* [`setup_workers()`](@ref) - set up workers for distributed computing
* [`seedCRNGs_workers!()`](@ref) - seed random number generators for distributed computing
"""
module EmbarrassinglyDistributed
using Random, Parameters, DataFrames
using Rimu, Rimu.ConsistentRNG
using Distributed
export d_lomc!, setup_workers, seedCRNGs_workers!, combine_dfs
"""
seedCRNGs_workers!([seed])
Seed the random number generators `CRNG` from [`ConsistentRNG`](@ref) on all
available processes in a `Distributed` environment deterministically from `seed`
but such that their pseudo-random number sequences are statistically
independent.
If no `seed` is given, obtain one from system entropy
(with [`Random.RandomDevice()`](@ref)).
"""
function seedCRNGs_workers!(seed=rand(Random.RandomDevice(),UInt))
@everywhere seedCRNG!($seed + hash(myid()))
end
"""
d_lomc!(ham, v; eqsteps, kwargs...)
-> (; dfs = DataFrame[], eqsteps)
Perform linear operator Monte Carlo with [`lomc!()`](@ref) in embarrassingly parallel
mode using `Distributed` computing. Returns all dataframes.
### Keyword arguments in addition to those of [`lomc!()`](@ref):
* `eqsteps` - Number of time steps used for equilibration. Each worker will run an independent simulation with `eqstep + (laststep - step) ÷ nworkers()` time steps.
### Example:
```julia
using Rimu, Rimu.EmbarrassinglyDistributed
setup_workers(4) # set up to run on 4 workers
seedCRNGs_workers!(127) # seed random number generators for deterministic evolution
add = BoseFS((1,1,0,1))
ham = HubbardReal1D(add, u=4.0)
v = DVec(add => 2, capacity = 200)
# run `lomc!()` for 20_100 time steps by
# performing 4 parallel runs of `lomc!()` with 5_100 time steps each and
# stiching the results together into a single dataframe:
df, eqsteps = d_lomc!(ham, v; eqsteps = 100, laststep = 20_100) |> combine_dfs
# or
energies = d_lomc!(ham, v; eqsteps = 100, laststep = 20_100) |> combine_dfs |> autoblock
```
### See also:
* [`setup_workers()`](@ref)
* [`seedCRNGs_workers!()`](@ref)
* [`combine_dfs()`](@ref)
"""
function d_lomc!(ham, v;
eqsteps,
laststep = nothing,
params::Rimu.FciqmcRunStrategy = Rimu.RunTillLastStep(),
kwargs...
)
nw = nworkers()
nw < 2 && @warn "Not enough workers available for parallel execution" nw
if !isnothing(laststep)
params.laststep = laststep
end
# unpack the parameters:
@unpack step, laststep = params
@assert laststep - step - eqsteps > nw "not enough time steps to run for"
psteps = laststep - step - eqsteps # steps to be run after equilibration
stepseach = psteps ÷ nw # to be run on each worker after equilibration
# @everywhere do_it() = Rimu.lomc!($ham, sizehint!($v,($vc*3)>>1); $kwargs..., laststep = $step+$stepseach, params = $params, threading = false).df
# start shorter jobs in parallel
# futures = [@spawnat(p, Main.do_it()) for p in workers()]
futures = [@spawnat(p, Rimu.lomc!(ham, v; kwargs..., laststep = step+eqsteps+stepseach, params = params, threading = false).df) for p in workers()]
# futures = [@spawnat(p, Rimu.lomc!(ham, v; kwargs..., laststep = step+stepseach, params = params, threading = false).df) for p in workers()]
dfs = fetch.(futures) # will now be an array of dataframes
return (; dfs, eqsteps) # returns NamedTuple
end
"""
combine_dfs(dfs::AbstractVector, eqsteps)
combine_dfs((dfs, eqsteps))
Return a dataframe with compounded data from
the parallel runs assuming that after `eqsteps` time steps the runs are
equilibrated and uncorrelated.
"""
combine_dfs(tuple) = combine_dfs(tuple...) # for easy chaining
function combine_dfs(dfs::AbstractVector, eqsteps)
# add worker id to dfs
for i in eachindex(dfs)
df = dfs[i]
df.workerid = [workers()[i] for n in 1:size(df)[1]]
df.stepsorg = copy(df.steps) # remember original time step
end
# check that the dataframes are all the same size:
@assert mapreduce(isequal(size(dfs[1])), &, size.(dfs))
# last time step recorded in the first dataframe
s = dfs[1].steps[end]
# create views of all the remaining dataframes starting after the eqsteps
lelem = size(dfs[1])[1]
df_adds = [view(dfs[i], (eqsteps+2):lelem, :) for i in 2:length(dfs)]
# now number the time steps in the views consecutively
for df in df_adds
df[:,:steps] .= [s+i for i in 1:length(df.steps)]
s = df.steps[end]
end
dfm = vcat(dfs[1],df_adds...)
return (; df=dfm, eqsteps)
end # d_lomc!()
"""
setup_workers([nw])
Set up and prepare `nw` workers for distributed computing. If `nw` is not given,
but multiple threads are available then as many workers will be prepared. The
`Rimu` package code will be made available on all workers.
"""
function setup_workers(nw = nothing)
# organise the right number of workers
if isnothing(nw)
# if `nw` is not specified, use at least as many workers are threads are
# available
while Threads.nthreads() > nworkers()
addprocs(1)
end
nw = nworkers()
else
while nw > nworkers()
addprocs(1)
end
end
@assert nw ≥ 1 "not enough workers to run in parallel"
while nworkers() > nw
# reduce the number of workers if it is larger than `nw`
rmprocs(workers()[end])
end
# just to check that we have the right number of workers now:
@assert nw == nworkers()
@eval @everywhere using Rimu # load code on all walkers
return nw
end
# function __init__()
# if myid() == 1
# nw = setup_workers()
# @info "`REmbarrassinglyDistributed`: $nw workers set up and ready to go"
# end
# end
end # module
|
proofpile-julia0005-42533 | {
"provenance": "014.jsonl.gz:242534"
} | Base.one(::Type{P}) where {C, T, P <: Polynomial{C, T}} = Polynomial(one(T))
Base.one(p::Polynomial) = one(typeof(p))
Base.zero(::Type{Polynomial{C, T, A}}) where {C, T, A} = Polynomial(A())
Base.zero(t::PolynomialLike) = zero(typeof(t))
combine(t1::Term, t2::Term) = combine(promote(t1, t2)...)
combine(t1::T, t2::T) where {T <: Term} = Term(t1.coefficient + t2.coefficient, t1.monomial)
compare(t1::Term, t2::Term) = monomial(t1) > monomial(t2)
# Graded Lexicographic order
# First compare total degree, then lexicographic order
function isless(m1::Monomial{V}, m2::Monomial{V}) where {V}
d1 = degree(m1)
d2 = degree(m2)
if d1 < d2
return true
elseif d1 > d2
return false
else
return exponents(m1) < exponents(m2)
end
end
jointerms(terms1::AbstractArray{<:Term}, terms2::AbstractArray{<:Term}) = mergesorted(terms1, terms2, compare, combine)
(+)(p1::Polynomial, p2::Polynomial) = Polynomial(jointerms(terms(p1), terms(p2)))
(-)(p1::Polynomial, p2::Polynomial) = Polynomial(jointerms(terms(p1), (-).(terms(p2))))
(==)(::Variable{N}, ::Variable{N}) where {N} = true
(==)(::Variable, ::Variable) = false
(==)(m1::Monomial{V}, m2::Monomial{V}) where {V} = exponents(m1) == exponents(m2)
# Multiplication is handled as a special case so that we can write these
# definitions without resorting to promotion:
MP.multconstant(α, v::Monomial) = Term(α, v)
MP.multconstant(α, v::Variable) = Term(α, Monomial(v))
(*)(v1::V, v2::V) where {V <: Variable} = Monomial{(V(),), 1}((2,))
(*)(v1::Variable, v2::Variable) = (*)(promote(v1, v2)...)
function MP.divides(m1::Monomial{V, N}, m2::Monomial{V, N}) where {V, N}
reduce((d, exp) -> d && (exp[1] <= exp[2]), zip(m1.exponents, m2.exponents), init=true)
end
MP.divides(m1::Monomial, m2::Monomial) = divides(promote(m1, m2)...)
function MP.mapexponents(op, m1::M, m2::M) where M<:Monomial
M(map(op, m1.exponents, m2.exponents))
end
MP.mapexponents(op, m1::Monomial, m2::Monomial) = mapexponents(op, promote(m1, m2)...)
# TODO: this could be faster with an in-place summation
function (*)(p1::Polynomial{S}, p2::Polynomial{T}) where {S, T}
C = Base.promote_op(*, S, T)
M = promote_type(monomialtype(p1), monomialtype(p2))
result = Polynomial(termtype(M, C)[])
for t1 in terms(p1)
for t2 in terms(p2)
result += t1 * t2
end
end
result
end
^(v::V, x::Integer) where {V <: Variable} = Monomial{(V(),), 1}((x,))
# dot(v1::AbstractVector{<:TermLike}, v2::AbstractVector) = dot(v1, v2)
# dot(v1::AbstractVector, v2::AbstractVector{<:TermLike}) = dot(v1, v2)
# dot(v1::AbstractVector{<:TermLike}, v2::AbstractVector{<:TermLike}) = dot(v1, v2)
# All of these types are immutable, so there's no need to copy anything to get
# a shallow copy.
Base.copy(x::TermLike) = x
Base.copy(p::Polynomial) = Polynomial(copy(terms(p)))
adjoint(v::Variable) = v
adjoint(m::Monomial) = m
adjoint(t::Term) = Term(adjoint(coefficient(t)), monomial(t))
adjoint(x::Polynomial) = Polynomial(adjoint.(terms(x)))
|
proofpile-julia0005-42534 | {
"provenance": "014.jsonl.gz:242535"
} | using LinearAlgebra: tril!, triu!
function batched_tril!(A::CuArray{T, N}, d) where {T, N}
if N < 2
error("MethodError: no method matching tril!(::Array{Float64,1})")
elseif N == 2
return tril!(x, d)
else
s = size(A)
m, n = s[1], s[2]
l = m*n
bs = Int(length(A) // l)
function batch_tril_kernel!(_A, _d)
li = (blockIdx().x - 1) * blockDim().x + threadIdx().x
b = (blockIdx().y - 1) * blockDim().y + threadIdx().y
@inbounds if 0 < li <= l && b <= bs
id = Tuple(CartesianIndices(_A)[Base._to_linear_index(_A, mod1(li, m), fld1(li, m), b)])
i, j = id
if i < j - _d
_A[id...] = 0
end
end
return nothing
end
max_threads = 256
thread_x = min(max_threads, l)
thread_y = min(max_threads ÷ thread_x, bs)
threads = (thread_x, thread_y)
blocks = ceil.(Int, (l, bs) ./ threads)
@cuda threads=threads blocks=blocks batch_tril_kernel!(A, d)
return A
end
end
function batched_triu!(A::CuArray{T, N}, d) where {T, N}
if N < 2
error("MethodError: no method matching triu!(::Array{Float64,1})")
elseif N == 2
return tril!(x, d)
else
s = size(A)
m, n = s[1], s[2]
l = m*n
bs = Int(length(A) // l)
function batch_triu_kernel!(_A, _d)
li = (blockIdx().x - 1) * blockDim().x + threadIdx().x
b = (blockIdx().y - 1) * blockDim().y + threadIdx().y
@inbounds if 0 < li <= l && b <= bs
id = Tuple(CartesianIndices(_A)[Base._to_linear_index(_A, mod1(li, m), fld1(li, m), b)])
i, j = id
if j < i + _d
_A[id...] = 0
end
end
return nothing
end
max_threads = 256
thread_x = min(max_threads, l)
thread_y = min(max_threads ÷ thread_x, bs)
threads = (thread_x, thread_y)
blocks = ceil.(Int, (l, bs) ./ threads)
@cuda threads=threads blocks=blocks batch_triu_kernel!(A, d)
return A
end
end
|
proofpile-julia0005-42535 | {
"provenance": "014.jsonl.gz:242536"
} | const SMALL_SIGNATURES = get(ENV, "PBC_SMALL_SIGNATURES", "n") == "y"
const SMALL_IDENTITIES = get(ENV, "PBC_SMALL_IDENTITIES", "y") == "y"
const NPROCS = get(ENV, "PBC_NPROCS", "") == "auto" ? Sys.CPU_THREADS : parse(Int, get(ENV, "PBC_NPROCS", "1"))
const BATCH_SIZE = Threads.nthreads() * parse(Int, get(ENV, "PBC_BATCH_SCALE_FACTOR", "1024"))
macro EP()
return SMALL_SIGNATURES ? :(Curve.EP) : :(Curve.EP2)
end
macro EP2()
return SMALL_SIGNATURES ? :(Curve.EP2) : :(Curve.EP)
end
macro ID()
return SMALL_IDENTITIES ? :(Int64) : :(Int128)
end
const G1 = Curve.curve_gen(@EP)
const G2 = Curve.curve_gen(@EP2)
const PUBLIC_KEY_SIZE = sizeof(G2.x)
const SIGNATURE_SIZE = sizeof(G1.x)
const PRIME = Curve.fp_prime_get()
const ORDER = Curve.curve_order(@EP)
# Effective number of bits required to store the private key
const PRIVATE_KEY_SIZE_BITS = ceil(Int, log2(BigInt(ORDER)))
# Effective number of bytes required to store the private key
const PRIVATE_KEY_SIZE = ceil(Int, PRIVATE_KEY_SIZE_BITS // 8)
# whether or not we have bits over for encoding LSB(y)
const CAN_STUFF_Y = ceil(Int, log2(BigInt(PRIME))) < (8 * Curve.FP_ST_SIZE)
|
proofpile-julia0005-42536 | {
"provenance": "014.jsonl.gz:242537"
} | int_rules_1_1_1_2 = @theory begin
#= ::Subsection::Closed:: =#
#= 1.1.1.2*(a+b*x)^m*(c+d*x)^n =#
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~(m') * (~c + ~(d') * ~x), ~x) => (~d * ~x * (~a + ~b * ~x) ^ (~m + 1)) / (~b * (~m + 2)) <-- FreeQ([~a, ~b, ~c, ~d, ~m], ~x) && EqQ(~a * ~d - ~b * ~c * (~m + 2), 0)
@apply_utils Antiderivative(1 / ((~a + ~(b') * ~x) * (~c + ~(d') * ~x)), ~x) => Antiderivative(1 / (~a * ~c + ~b * ~d * (~x) ^ 2), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && EqQ(~b * ~c + ~a * ~d, 0)
@apply_utils Antiderivative(1 / ((~(a') + ~(b') * ~x) * (~(c') + ~(d') * ~x)), ~x) => (~b / (~b * ~c - ~a * ~d)) * Antiderivative(1 / (~a + ~b * ~x), ~x) - (~d / (~b * ~c - ~a * ~d)) * Antiderivative(1 / (~c + ~d * ~x), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && NeQ(~b * ~c - ~a * ~d, 0)
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~(m') * (~(c') + ~(d') * ~x) ^ ~n, ~x) => ((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ (~n + 1)) / ((~b * ~c - ~a * ~d) * (~m + 1)) <-- FreeQ([~a, ~b, ~c, ~d, ~m, ~n], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (EqQ(~m + ~n + 2, 0) && NeQ(~m, -1)))
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~m, ~x) => (~x * (~a + ~b * ~x) ^ ~m * (~c + ~d * ~x) ^ ~m) / (2 * ~m + 1) + ((2 * ~a * ~c * ~m) / (2 * ~m + 1)) * Antiderivative((~a + ~b * ~x) ^ (~m - 1) * (~c + ~d * ~x) ^ (~m - 1), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (EqQ(~b * ~c + ~a * ~d, 0) && IGtQ(~m + 1 / 2, 0))
@apply_utils Antiderivative(1 / ((~a + ~(b') * ~x) ^ (3 / 2) * (~c + ~(d') * ~x) ^ (3 / 2)), ~x) => ~x / (~a * ~c * sqrt(~a + ~b * ~x) * sqrt(~c + ~d * ~x)) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && EqQ(~b * ~c + ~a * ~d, 0)
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~m, ~x) => (-(~x) * (~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ (~m + 1)) / (2 * ~a * ~c * (~m + 1)) + ((2 * ~m + 3) / (2 * ~a * ~c * (~m + 1))) * Antiderivative((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ (~m + 1), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (EqQ(~b * ~c + ~a * ~d, 0) && ILtQ(~m + 3 / 2, 0))
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~(m') * (~c + ~(d') * ~x) ^ ~(m'), ~x) => Antiderivative((~a * ~c + ~b * ~d * (~x) ^ 2) ^ ~m, ~x) <-- FreeQ([~a, ~b, ~c, ~d, ~m], ~x) && (EqQ(~b * ~c + ~a * ~d, 0) && (IntegerQ(~m) || GtQ(~a, 0) && GtQ(~c, 0)))
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~m, ~x) => (((~a + ~b * ~x) ^ FracPart(~m) * (~c + ~d * ~x) ^ FracPart(~m)) / (~a * ~c + ~b * ~d * (~x) ^ 2) ^ FracPart(~m)) * Antiderivative((~a * ~c + ~b * ~d * (~x) ^ 2) ^ ~m, ~x) <-- FreeQ([~a, ~b, ~c, ~d, ~m], ~x) && (EqQ(~b * ~c + ~a * ~d, 0) && Not(IntegerQ(2 * ~m)))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~m * (~(c') + ~(d') * ~x) ^ ~n, ~x) => ((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ ~n) / (~b * (~m + 1)) - ((~d * ~n) / (~b * (~m + 1))) * Antiderivative((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ (~n - 1), ~x) <-- FreeQ([~a, ~b, ~c, ~d, ~n], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (ILtQ(~m, -1) && (Not(IntegerQ(~n)) && GtQ(~n, 0))))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~m * (~(c') + ~(d') * ~x) ^ ~n, ~x) => ((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ (~n + 1)) / ((~b * ~c - ~a * ~d) * (~m + 1)) - ((~d * (~m + ~n + 2)) / ((~b * ~c - ~a * ~d) * (~m + 1))) * Antiderivative((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ ~n, ~x) <-- FreeQ([~a, ~b, ~c, ~d, ~n], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (ILtQ(~m, -1) && (Not(IntegerQ(~n)) && LtQ(~n, 0))))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~(m') * (~(c') + ~(d') * ~x) ^ ~(n'), ~x) => Antiderivative(ExpandIntegrand((~a + ~b * ~x) ^ ~m * (~c + ~d * ~x) ^ ~n, ~x), ~x) <-- FreeQ([~a, ~b, ~c, ~d, ~n], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (IGtQ(~m, 0) && (Not(IntegerQ(~n)) || (EqQ(~c, 0) && LeQ(7 * ~m + 4 * ~n + 4, 0) || (LtQ(9 * ~m + 5 * (~n + 1), 0) || GtQ(~m + ~n + 2, 0))))))
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~m * (~(c') + ~(d') * ~x) ^ ~(n'), ~x) => Antiderivative(ExpandIntegrand((~a + ~b * ~x) ^ ~m * (~c + ~d * ~x) ^ ~n, ~x), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (ILtQ(~m, 0) && (IntegerQ(~n) && Not(IGtQ(~n, 0) && LtQ(~m + ~n + 2, 0)))))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~m * (~(c') + ~(d') * ~x) ^ ~n, ~x) => ((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ (~n + 1)) / ((~b * ~c - ~a * ~d) * (~m + 1)) - ((~d * Simplify(~m + ~n + 2)) / ((~b * ~c - ~a * ~d) * (~m + 1))) * Antiderivative((~a + ~b * ~x) ^ Simplify(~m + 1) * (~c + ~d * ~x) ^ ~n, ~x) <-- FreeQ([~a, ~b, ~c, ~d, ~m, ~n], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (ILtQ(Simplify(~m + ~n + 2), 0) && (NeQ(~m, -1) && (Not(LtQ(~m, -1) && (LtQ(~n, -1) && (EqQ(~a, 0) || NeQ(~c, 0) && (LtQ(~m - ~n, 0) && IntegerQ(~n))))) && (SumSimplerQ(~m, 1) || Not(SumSimplerQ(~n, 1)))))))
@apply_utils Antiderivative(1 / ((~a + ~(b') * ~x) ^ (9 / 4) * (~c + ~(d') * ~x) ^ (1 / 4)), ~x) => -4 / (5 * ~b * (~a + ~b * ~x) ^ (5 / 4) * (~c + ~d * ~x) ^ (1 / 4)) - (~d / (5 * ~b)) * Antiderivative(1 / ((~a + ~b * ~x) ^ (5 / 4) * (~c + ~d * ~x) ^ (5 / 4)), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (EqQ(~b * ~c + ~a * ~d, 0) && NegQ((~a) ^ 2 * (~b) ^ 2))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~m * (~(c') + ~(d') * ~x) ^ ~n, ~x) => ((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ ~n) / (~b * (~m + 1)) - ((~d * ~n) / (~b * (~m + 1))) * Antiderivative((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ (~n - 1), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (GtQ(~n, 0) && (LtQ(~m, -1) && (Not(IntegerQ(~n) && Not(IntegerQ(~m))) && (Not(ILeQ(~m + ~n + 2, 0) && (FractionQ(~m) || GeQ(2 * ~n + ~m + 1, 0))) && IntLinearQ(~a, ~b, ~c, ~d, ~m, ~n, ~x))))))
@apply_utils Antiderivative(1 / ((~a + ~(b') * ~x) ^ (5 / 4) * (~c + ~(d') * ~x) ^ (1 / 4)), ~x) => -2 / (~b * (~a + ~b * ~x) ^ (1 / 4) * (~c + ~d * ~x) ^ (1 / 4)) + ~c * Antiderivative(1 / ((~a + ~b * ~x) ^ (5 / 4) * (~c + ~d * ~x) ^ (5 / 4)), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (EqQ(~b * ~c + ~a * ~d, 0) && NegQ((~a) ^ 2 * (~b) ^ 2))
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~n, ~x) => ((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ ~n) / (~b * (~m + ~n + 1)) + ((2 * ~c * ~n) / (~m + ~n + 1)) * Antiderivative((~a + ~b * ~x) ^ ~m * (~c + ~d * ~x) ^ (~n - 1), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (EqQ(~b * ~c + ~a * ~d, 0) && (IGtQ(~m + 1 / 2, 0) && (IGtQ(~n + 1 / 2, 0) && LtQ(~m, ~n))))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~m * (~(c') + ~(d') * ~x) ^ ~n, ~x) => ((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ ~n) / (~b * (~m + ~n + 1)) + ((~n * (~b * ~c - ~a * ~d)) / (~b * (~m + ~n + 1))) * Antiderivative((~a + ~b * ~x) ^ ~m * (~c + ~d * ~x) ^ (~n - 1), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (GtQ(~n, 0) && (NeQ(~m + ~n + 1, 0) && (Not(IGtQ(~m, 0) && (Not(IntegerQ(~n)) || GtQ(~m, 0) && LtQ(~m - ~n, 0))) && (Not(ILtQ(~m + ~n + 2, 0)) && IntLinearQ(~a, ~b, ~c, ~d, ~m, ~n, ~x))))))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~m * (~(c') + ~(d') * ~x) ^ ~n, ~x) => ((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ (~n + 1)) / ((~b * ~c - ~a * ~d) * (~m + 1)) - ((~d * (~m + ~n + 2)) / ((~b * ~c - ~a * ~d) * (~m + 1))) * Antiderivative((~a + ~b * ~x) ^ (~m + 1) * (~c + ~d * ~x) ^ ~n, ~x) <-- FreeQ([~a, ~b, ~c, ~d, ~n], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (LtQ(~m, -1) && (Not(LtQ(~n, -1) && (EqQ(~a, 0) || NeQ(~c, 0) && (LtQ(~m - ~n, 0) && IntegerQ(~n)))) && IntLinearQ(~a, ~b, ~c, ~d, ~m, ~n, ~x))))
@apply_utils Antiderivative(1 / (sqrt(~a + ~(b') * ~x) * sqrt(~c + ~(d') * ~x)), ~x) => acosh((~b * ~x) / ~a) / ~b <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (EqQ(~a + ~c, 0) && (EqQ(~b - ~d, 0) && GtQ(~a, 0)))
@apply_utils Antiderivative(1 / (sqrt(~a + ~(b') * ~x) * sqrt(~(c') + ~(d') * ~x)), ~x) => Antiderivative(1 / sqrt((~a * ~c - ~b * (~a - ~c) * ~x) - (~b) ^ 2 * (~x) ^ 2), ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (EqQ(~b + ~d, 0) && GtQ(~a + ~c, 0))
@apply_utils Antiderivative(1 / (sqrt(~(a') + ~(b') * ~x) * sqrt(~(c') + ~(d') * ~x)), ~x) => (2 / sqrt(~b)) * Subst(Antiderivative(1 / sqrt((~b * ~c - ~a * ~d) + ~d * (~x) ^ 2), ~x), ~x, sqrt(~a + ~b * ~x)) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (GtQ(~b * ~c - ~a * ~d, 0) && GtQ(~b, 0))
@apply_utils Antiderivative(1 / ((~(a') + ~(b') * ~x) * (~(c') + ~(d') * ~x) ^ (1 / 3)), ~x) => With([q = Rt((~b * ~c - ~a * ~d) / ~b, 3)], (-(log(RemoveContent(~a + ~b * ~x, ~x))) / (2 * ~b * q) - (3 / (2 * ~b * q)) * Subst(Antiderivative(1 / (q - ~x), ~x), ~x, (~c + ~d * ~x) ^ (1 / 3))) + (3 / (2 * ~b)) * Subst(Antiderivative(1 / (q ^ 2 + q * ~x + (~x) ^ 2), ~x), ~x, (~c + ~d * ~x) ^ (1 / 3))) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && PosQ((~b * ~c - ~a * ~d) / ~b)
@apply_utils Antiderivative(1 / ((~(a') + ~(b') * ~x) * (~(c') + ~(d') * ~x) ^ (1 / 3)), ~x) => With([q = Rt(-((~b * ~c - ~a * ~d)) / ~b, 3)], (log(RemoveContent(~a + ~b * ~x, ~x)) / (2 * ~b * q) - (3 / (2 * ~b * q)) * Subst(Antiderivative(1 / (q + ~x), ~x), ~x, (~c + ~d * ~x) ^ (1 / 3))) + (3 / (2 * ~b)) * Subst(Antiderivative(1 / ((q ^ 2 - q * ~x) + (~x) ^ 2), ~x), ~x, (~c + ~d * ~x) ^ (1 / 3))) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && NegQ((~b * ~c - ~a * ~d) / ~b)
@apply_utils Antiderivative(1 / ((~(a') + ~(b') * ~x) * (~(c') + ~(d') * ~x) ^ (2 / 3)), ~x) => With([q = Rt((~b * ~c - ~a * ~d) / ~b, 3)], (-(log(RemoveContent(~a + ~b * ~x, ~x))) / (2 * ~b * q ^ 2) - (3 / (2 * ~b * q ^ 2)) * Subst(Antiderivative(1 / (q - ~x), ~x), ~x, (~c + ~d * ~x) ^ (1 / 3))) - (3 / (2 * ~b * q)) * Subst(Antiderivative(1 / (q ^ 2 + q * ~x + (~x) ^ 2), ~x), ~x, (~c + ~d * ~x) ^ (1 / 3))) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && PosQ((~b * ~c - ~a * ~d) / ~b)
@apply_utils Antiderivative(1 / ((~(a') + ~(b') * ~x) * (~(c') + ~(d') * ~x) ^ (2 / 3)), ~x) => With([q = Rt(-((~b * ~c - ~a * ~d)) / ~b, 3)], -(log(RemoveContent(~a + ~b * ~x, ~x))) / (2 * ~b * q ^ 2) + (3 / (2 * ~b * q ^ 2)) * Subst(Antiderivative(1 / (q + ~x), ~x), ~x, (~c + ~d * ~x) ^ (1 / 3)) + (3 / (2 * ~b * q)) * Subst(Antiderivative(1 / ((q ^ 2 - q * ~x) + (~x) ^ 2), ~x), ~x, (~c + ~d * ~x) ^ (1 / 3))) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && NegQ((~b * ~c - ~a * ~d) / ~b)
@apply_utils Antiderivative(1 / ((~(a') + ~(b') * ~x) ^ (1 / 3) * (~(c') + ~(d') * ~x) ^ (2 / 3)), ~x) => With([q = Rt(~d / ~b, 3)], (((-(sqrt(3)) * q) / ~d) * atan((2 * q * (~a + ~b * ~x) ^ (1 / 3)) / (sqrt(3) * (~c + ~d * ~x) ^ (1 / 3)) + 1 / sqrt(3)) - (q / (2 * ~d)) * log(~c + ~d * ~x)) - ((3q) / (2 * ~d)) * log((q * (~a + ~b * ~x) ^ (1 / 3)) / (~c + ~d * ~x) ^ (1 / 3) - 1)) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && PosQ(~d / ~b))
@apply_utils Antiderivative(1 / ((~(a') + ~(b') * ~x) ^ (1 / 3) * (~(c') + ~(d') * ~x) ^ (2 / 3)), ~x) => With([q = Rt(-(~d) / ~b, 3)], ((sqrt(3) * q) / ~d) * atan(1 / sqrt(3) - (2 * q * (~a + ~b * ~x) ^ (1 / 3)) / (sqrt(3) * (~c + ~d * ~x) ^ (1 / 3))) + (q / (2 * ~d)) * log(~c + ~d * ~x) + ((3q) / (2 * ~d)) * log((q * (~a + ~b * ~x) ^ (1 / 3)) / (~c + ~d * ~x) ^ (1 / 3) + 1)) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && NegQ(~d / ~b))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~m, ~x) => (((~a + ~b * ~x) ^ ~m * (~c + ~d * ~x) ^ ~m) / (~a * ~c + (~b * ~c + ~a * ~d) * ~x + ~b * ~d * (~x) ^ 2) ^ ~m) * Antiderivative((~a * ~c + (~b * ~c + ~a * ~d) * ~x + ~b * ~d * (~x) ^ 2) ^ ~m, ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (LtQ(-1, ~m, 0) && (LeQ(3, Denominator(~m), 4) && AtomQ(~b * ~c + ~a * ~d))))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~m, ~x) => (((~a + ~b * ~x) ^ ~m * (~c + ~d * ~x) ^ ~m) / ((~a + ~b * ~x) * (~c + ~d * ~x)) ^ ~m) * Antiderivative((~a * ~c + (~b * ~c + ~a * ~d) * ~x + ~b * ~d * (~x) ^ 2) ^ ~m, ~x) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (LtQ(-1, ~m, 0) && LeQ(3, Denominator(~m), 4)))
@apply_utils Antiderivative((~(a') + ~(b') * ~x) ^ ~m * (~(c') + ~(d') * ~x) ^ ~n, ~x) => With([p = Denominator(~m)], (p / ~b) * Subst(Antiderivative((~x) ^ (p * (~m + 1) - 1) * ((~c - (~a * ~d) / ~b) + (~d * (~x) ^ p) / ~b) ^ ~n, ~x), ~x, (~a + ~b * ~x) ^ (1 / p))) <-- FreeQ([~a, ~b, ~c, ~d], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (LtQ(-1, ~m, 0) && (LeQ(-1, ~n, 0) && (LeQ(Denominator(~n), Denominator(~m)) && IntLinearQ(~a, ~b, ~c, ~d, ~m, ~n, ~x)))))
@apply_utils Antiderivative((~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~n, ~x) => (((~c) ^ ~n * (~b * ~x) ^ (~m + 1)) / (~b * (~m + 1))) * HypergeometricFunctions._₂F₁(-(~n), ~m + 1, ~m + 2, (-(~d) * ~x) / ~c) <-- FreeQ([~b, ~c, ~d, ~m, ~n], ~x) && (Not(IntegerQ(~m)) && (IntegerQ(~n) || GtQ(~c, 0) && Not(EqQ(~n, -1 / 2) && (EqQ((~c) ^ 2 - (~d) ^ 2, 0) && GtQ(-(~d) / (~b * ~c), 0)))))
@apply_utils Antiderivative((~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~n, ~x) => ((~c + ~d * ~x) ^ (~n + 1) / (~d * (~n + 1) * (-(~d) / (~b * ~c)) ^ ~m)) * HypergeometricFunctions._₂F₁(-(~m), ~n + 1, ~n + 2, 1 + (~d * ~x) / ~c) <-- FreeQ([~b, ~c, ~d, ~m, ~n], ~x) && (Not(IntegerQ(~n)) && (IntegerQ(~m) || GtQ(-(~d) / (~b * ~c), 0)))
@apply_utils Antiderivative((~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~n, ~x) => (((~c) ^ IntPart(~n) * (~c + ~d * ~x) ^ FracPart(~n)) / (1 + (~d * ~x) / ~c) ^ FracPart(~n)) * Antiderivative((~b * ~x) ^ ~m * (1 + (~d * ~x) / ~c) ^ ~n, ~x) <-- FreeQ([~b, ~c, ~d, ~m, ~n], ~x) && (Not(IntegerQ(~m)) && (Not(IntegerQ(~n)) && (Not(GtQ(~c, 0)) && (Not(GtQ(-(~d) / (~b * ~c), 0)) && (RationalQ(~m) && Not(EqQ(~n, -1 / 2) && EqQ((~c) ^ 2 - (~d) ^ 2, 0)) || Not(RationalQ(~n)))))))
@apply_utils Antiderivative((~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~n, ~x) => ((((-(~b) * ~c) / ~d) ^ IntPart(~m) * (~b * ~x) ^ FracPart(~m)) / ((-(~d) * ~x) / ~c) ^ FracPart(~m)) * Antiderivative(((-(~d) * ~x) / ~c) ^ ~m * (~c + ~d * ~x) ^ ~n, ~x) <-- FreeQ([~b, ~c, ~d, ~m, ~n], ~x) && (Not(IntegerQ(~m)) && (Not(IntegerQ(~n)) && (Not(GtQ(~c, 0)) && Not(GtQ(-(~d) / (~b * ~c), 0)))))
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~n, ~x) => (((~b * ~c - ~a * ~d) ^ ~n * (~a + ~b * ~x) ^ (~m + 1)) / ((~b) ^ (~n + 1) * (~m + 1))) * HypergeometricFunctions._₂F₁(-(~n), ~m + 1, ~m + 2, (-(~d) * (~a + ~b * ~x)) / (~b * ~c - ~a * ~d)) <-- FreeQ([~a, ~b, ~c, ~d, ~m], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (Not(IntegerQ(~m)) && IntegerQ(~n)))
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~n, ~x) => ((~a + ~b * ~x) ^ (~m + 1) / (~b * (~m + 1) * (~b / (~b * ~c - ~a * ~d)) ^ ~n)) * HypergeometricFunctions._₂F₁(-(~n), ~m + 1, ~m + 2, (-(~d) * (~a + ~b * ~x)) / (~b * ~c - ~a * ~d)) <-- FreeQ([~a, ~b, ~c, ~d, ~m, ~n], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (Not(IntegerQ(~m)) && (Not(IntegerQ(~n)) && (GtQ(~b / (~b * ~c - ~a * ~d), 0) && (RationalQ(~m) || Not(RationalQ(~n) && GtQ(-(~d) / (~b * ~c - ~a * ~d), 0)))))))
@apply_utils Antiderivative((~a + ~(b') * ~x) ^ ~m * (~c + ~(d') * ~x) ^ ~n, ~x) => ((~c + ~d * ~x) ^ FracPart(~n) / ((~b / (~b * ~c - ~a * ~d)) ^ IntPart(~n) * ((~b * (~c + ~d * ~x)) / (~b * ~c - ~a * ~d)) ^ FracPart(~n))) * Antiderivative((~a + ~b * ~x) ^ ~m * Simp((~b * ~c) / (~b * ~c - ~a * ~d) + (~b * ~d * ~x) / (~b * ~c - ~a * ~d), ~x) ^ ~n, ~x) <-- FreeQ([~a, ~b, ~c, ~d, ~m, ~n], ~x) && (NeQ(~b * ~c - ~a * ~d, 0) && (Not(IntegerQ(~m)) && (Not(IntegerQ(~n)) && (RationalQ(~m) || Not(SimplerQ(~n + 1, ~m + 1))))))
@apply_utils Antiderivative((~(a') + ~(b') * ~u) ^ ~(m') * (~(c') + ~(d') * ~u) ^ ~(n'), ~x) => (1 / Coefficient(~u, ~x, 1)) * Subst(Antiderivative((~a + ~b * ~x) ^ ~m * (~c + ~d * ~x) ^ ~n, ~x), ~x, ~u) <-- FreeQ([~a, ~b, ~c, ~d, ~m, ~n], ~x) && (LinearQ(~u, ~x) && NeQ(Coefficient(~u, ~x, 0), 0))
#= IntLinearQ(a,b,c,d,m,n,x)*returns*True*iff*(a+b*x)^m*(c+d*x)^n*is integrable*wrt*x*in*terms*of*non-hypergeometric*functions. =#IntLinearQ((~a), (~b), (~c), (~d), (~m), (~n), (~x)) := IGtQ(m, 0) || IGtQ(n, 0) || IntegersQ(3*m, 3*n) || IntegersQ(4*m, 4*n) || IntegersQ(2*m, 6*n) || IntegersQ(6*m, 2*n) || ILtQ(m + n, -1) || IntegerQ(m + n) && RationalQ(m)
end
|
proofpile-julia0005-42537 | {
"provenance": "014.jsonl.gz:242538"
} | using DataFrames: DataFrame, DataFrame!
using CSV: File
using DataStructures
using Parameters
using Convex, SCS
using Plots
using MathOptInterface
const MOI = MathOptInterface
using EllipsisNotation
#=
Notation:
pᵢ - inflow proportions
CFᵢ - cumulative inflows
CFₒ - cumulative outflows
ECᵢ - expected costs of vehicles coming in
ECₒ - expected costs of vehicles going out
DTC - departure time choices
SR - splitting rates
=#
ROUND_DIGITS = 13
include("types.jl")
include("crange.jl")
include("utils.jl")
include("models.jl")
ugap, bgap = 3, 4 # used in computing sending and receiving functions
u, w = 1/ugap, 1/bgap
Q, N, δ = 24.0, 80, w/u
demand_level, T, Tm = 1800, 132*ugap, 80*ugap
M = 10 * T
α, β, γ, trgt = 1., 0.5, 2., 60*ugap
linkdata = DataFrame!(File("nguyen.csv"; delim=", "))
linkdata.l *= ugap;
net = Network(linkdata);
nclasses, nsinks, nlinks = 1, 2, numlinks(net);
srcs, snks, mrgs, divs = sources(net), sinks(net), merges(net), diverges(net);
trips = ones(length(srcs), length(snks), nclasses) .* demand_level;
include("initchoices.jl");
include("simulation.jl");
include("costcomputation.jl");
include("equilibrium.jl");
DTC, SR = initchoices();
i = 5; DTC, SR = DTC_list[i], SR_list[i];
CFᵢ, CFₒ, states = simulate();
ECᵢ, ECₒ, rtracs = computecosts();
relgap(ECᵢ, DTC)
DTC, SR = updatechoices!()
relgap(ECᵢ, DTC)
DTC_list = [DTC]
SR_list = [SR]
for i in 1:30
global DTC, SR, CFᵢ, CFₒ, ECᵢ, ECₒ
#@show i
CFᵢ, CFₒ, states = simulate()#(DTC, SR)
ECᵢ, ECₒ, rtracs = computecosts()#(states)
@show relgap(ECᵢ, DTC)
#@assert approxpositive(Fᵢ) # all((Fᵢ .>= 0.) .| (Fᵢ .≈ 0.))
#@assert approxpositive(Fₒ) # all(Fₒ .>= 0.)
@assert all(ECᵢ .>= 0.)
@assert all(ECₒ .>= 0.)
newDTC, newSR = updatechoices!() #(ECᵢ, DTC, SR)
push!(DTC_list, DTC)
push!(SR_list, SR)
DTC, SR = newDTC, newSR
end
|
proofpile-julia0005-42538 | {
"provenance": "014.jsonl.gz:242539"
} | using MatrixCompletion
@testset "$(format("Exponential Family: forward_map[poisson]"))" begin
let
tc1 = rand(100)
@test check(:l2diff, forward_map(Poisson(),tc1),exp.(tc1),0)
@test check(:l2diff, forward_map(:Poisson,tc1),exp.(tc1),0)
tc2 = rand(100,100)
@test check(:l2diff, forward_map(Poisson(),tc2),exp.(tc2),0)
@test check(:l2diff, forward_map(:Poisson,tc2),exp.(tc2),0)
end
end
@testset "$(format("Exponential Family: forward_map[gamma]"))" begin
let
tc1 = rand(100)
@test check(:l2diff, forward_map(Gamma(),tc1),1 ./ tc1,0)
@test check(:l2diff, forward_map(:Gamma,tc1),1 ./ tc1,0)
tc2 = rand(100,100)
@test check(:l2diff, forward_map(Gamma(),tc2),1 ./ tc2,0)
@test check(:l2diff, forward_map(:Gamma,tc2),1 ./ tc2,0)
end
end
@testset "$(format("Exponential Family: forward_map[gaussian]"))" begin
let
tc1 = rand(100)
@test check(:l2diff, forward_map(Gaussian(),tc1),tc1,0)
@test check(:l2diff, forward_map(:Gaussian,tc1), tc1,0)
tc2 = rand(100,100)
@test check(:l2diff, forward_map(Gaussian(),tc2),tc2,0)
@test check(:l2diff, forward_map(:Gaussian,tc2),tc2,0)
end
end
@testset "$(format("Exponential Family: forward_map[bernoulli]"))" begin
let
logit = (x) -> log.(x./(1 .- x))
tc1 = rand(100)
tc1_logit = logit(tc1)
@test check(:l2diff, forward_map(Bernoulli(),tc1_logit),tc1,0)
@test check(:l2diff, forward_map(:Bernoulli,tc1_logit), tc1,0)
tc2 = rand(100,100)
tc2_logit = logit(tc2)
@test check(:l2diff, forward_map(Bernoulli(),tc2_logit),tc2,0)
@test check(:l2diff, forward_map(:Bernoulli,tc2_logit), tc2,0)
end
end
@testset "$(format("Exponential Family: predict[poisson]"))" begin
let
tc1 = rand(2:20,100)
log_tc1 = log.(tc1)
@test check(:l2diff, predict(Poisson(),forward_map(Poisson(),log_tc1)),tc1,0.0)
@test check(:l2diff, predict(:Poisson,forward_map(:Poisson,log_tc1)),tc1,0.0)
@test predict(:Poisson,[1,1];custom_prediction_function=1) == -1 #
tc2 = rand(2:20,100,100)
log_tc2 = log.(tc2)
@test check(:l2diff, predict(Poisson(),forward_map(Poisson(),log_tc2)),tc2,0.0)
@test check(:l2diff, predict(:Poisson,forward_map(:Poisson,log_tc2)),tc2,0.0)
@test predict(:Poisson,[1 1;1 1];custom_prediction_function=1) == -1 #
end
end
@testset "$(format("Exponential Family: predict[bernoulli]"))" begin
let
logit = (x) -> log.(x./(1 .- x))
tc1 = rand(100)
tc1_int = Int.(tc1 .> 0.5)
logit_tc1 = logit(tc1)
@test check(:l2diff, predict(Bernoulli(), forward_map(Bernoulli(),logit_tc1)), tc1_int, 0.0)
@test check(:l2diff, predict(:Bernoulli, forward_map(:Bernoulli, logit_tc1)), tc1_int, 0.0)
@test predict(:Bernoulli,[1,1];custom_prediction_function=1) == -1 #
tc2 = rand(100,100)
tc2_int = Int.(tc2 .> 0.5)
logit_tc2 = logit(tc2)
@test check(:l2diff, predict(Bernoulli(),forward_map(Bernoulli(),logit_tc2)),tc2_int,0.0)
@test check(:l2diff, predict(:Bernoulli,forward_map(:Bernoulli,logit_tc2)),tc2_int,0.0)
@test predict(:Bernoulli,[1 1;1 1];custom_prediction_function=1) == -1 #
end
end
@testset "$(format("Exponential Family: predict[gaussian]"))" begin
let
tc1 = rand(100)
@test check(:l2diff, predict(Gaussian(),forward_map(Gaussian(),tc1)),tc1,0.0)
@test check(:l2diff, predict(:Gaussian,forward_map(:Gaussian,tc1)),tc1,0.0)
@test predict(:Gaussian,[1,1];custom_prediction_function=1) == -1 #
tc2 = rand(100,100)
@test check(:l2diff, predict(Gaussian(),forward_map(Gaussian(),tc2)),tc2,0.0)
@test check(:l2diff, predict(:Gaussian,forward_map(:Gaussian,tc2)),tc2,0.0)
@test predict(:Poisson,[1 1;1 1];custom_prediction_function=1) == -1 #
end
end
@testset "$(format("Exponential Family: predict[gaussian]"))" begin
let
tc1 = rand(100)
inv_tc1 = 1 ./ tc1
@test check(:l2diff, predict(Gamma(),forward_map(Gamma(),1 ./ tc1)),tc1,0.0)
@test check(:l2diff, predict(:Gamma,forward_map(:Gamma,1 ./ tc1)),tc1,0.0)
@test predict(:Gamma,[1,1];custom_prediction_function=1) == -1 #
tc2 = rand(100,100)
inv_tc2 = 1 ./ tc2
@test check(:l2diff, predict(Gamma(),forward_map(Gamma(),inv_tc2)),tc2,0.0)
@test check(:l2diff, predict(:Gamma,forward_map(:Gamma,inv_tc2)),tc2,0.0)
@test predict(:Poisson,[1 1;1 1];custom_prediction_function=1) == -1 #
end
end
|
proofpile-julia0005-42539 | {
"provenance": "014.jsonl.gz:242540"
} | ##############################################################################
##
## Type with stored qr
##
##############################################################################
struct DenseQRAllocatedSolver{Tqr <: StridedMatrix, Tu <: AbstractVector} <: AbstractAllocatedSolver
qrm::Tqr
u::Tu
function DenseQRAllocatedSolver{Tqr, Tu}(qrm, u) where {Tqr <: StridedMatrix, Tu <: AbstractVector}
length(u) == size(qrm, 1) || throw(DimensionMismatch("u must have length size(J, 1)"))
new(qrm, u)
end
end
function DenseQRAllocatedSolver(qrm::Tqr, u::Tu) where {Tqr <: StridedMatrix, Tu <: AbstractVector}
DenseQRAllocatedSolver{Tqr, Tu}(qrm, u)
end
##############################################################################
##
## solve J'J \ J'y by QR
##
##############################################################################
function AbstractAllocatedSolver(nls::LeastSquaresProblem{Tx, Ty, Tf, TJ, Tg}, optimizer::Dogleg{QR}) where {Tx, Ty, Tf, TJ <: StridedVecOrMat, Tg}
return DenseQRAllocatedSolver(similar(nls.J), _zeros(nls.y))
end
function LinearAlgebra.ldiv!(x::AbstractVector, J::StridedMatrix, y::AbstractVector, A::DenseQRAllocatedSolver)
u, qrm = A.u, A.qrm
copyto!(qrm, J)
copyto!(u, y)
if VERSION >= v"1.7.0"
ldiv!(qr!(qrm, ColumnNorm()), u)
else
ldiv!(qr!(qrm, Val(true)), u)
end
for i in 1:length(x)
x[i] = u[i]
end
return x, 1
end
##############################################################################
##
## solve (J'J + diagm(damp)) \ J'y by QR
##
##############################################################################
function AbstractAllocatedSolver(nls::LeastSquaresProblem{Tx, Ty, Tf, TJ, Tg}, optimizer::LevenbergMarquardt{QR}) where {Tx, Ty, Tf, TJ <: StridedVecOrMat, Tg}
qrm = zeros(eltype(nls.J), length(nls.y) + length(nls.x), length(nls.x))
u = zeros(eltype(nls.y), length(nls.y) + length(nls.x))
return DenseQRAllocatedSolver(qrm, u)
end
function LinearAlgebra.ldiv!(x::AbstractVector, J::StridedMatrix, y::AbstractVector,
damp::AbstractVector, A::DenseQRAllocatedSolver, verbose::Bool = false)
u, qrm = A.u, A.qrm
# transform dammp
length(u) == length(y) + length(x) || throw(DimensionMismatch("length(u) should equal length(x) + length(y)"))
# update qr as |J; diagm(damp)|
fill!(qrm, zero(eltype(qrm)))
for j in 1:size(J, 2)
for i in 1:size(J, 1)
qrm[i, j] = J[i, j]
end
end
leny = length(y)
for i in 1:length(damp)
qrm[leny + i, i] = sqrt(damp[i])
end
# update u as |J; 0|
fill!(u, zero(eltype(u)))
for i in 1:length(y)
u[i] = y[i]
end
if verbose
@show mean(u)
sleep(1)
end
# solve
if VERSION >= v"1.7.0"
ldiv!(qr!(qrm, ColumnNorm()), u)
else
ldiv!(qr!(qrm, Val(true)), u)
end
if verbose
@show mean(u)
sleep(1)
end
for i in 1:length(x)
x[i] = u[i]
end
return x, 1
end
|
proofpile-julia0005-42540 | {
"provenance": "014.jsonl.gz:242541"
} | @i function isolve(out!::T, sg::AbstractSpinglass{SquareLattice, T}, reg::ArrayReg{B,TT}, A_STACK, B_STACK) where {B,T,TT<:Tropical{T}}
@invcheckoff begin
@routine begin
Lx ← sg.lattice.Nx
Ly ← sg.lattice.Ny
Js ← sg.Js
hs ← sg.hs
end
@safe println("Layer 1/$Lx, stack size: $(A_STACK.top) & $(B_STACK.top)")
k ← 0
for j=1:Ly
apply_Gh!(reg, j, hs[j], A_STACK)
end
for j=1:Ly-1
k += 1
apply_Gvb!(reg, (@const (j, j+1)), Js[k], A_STACK)
end
for i=2:Lx
@safe println("Layer $i/$Lx, stack size: $(A_STACK.top) & $(B_STACK.top)")
@routine begin
for j=1:Ly
k += 1
apply_Ghb!(reg, j, Js[k], A_STACK)
end
end
# bookup current state and loss
incstack!(B_STACK)
store_state!(B_STACK, reg.state)
# clean up `NiLang.GLOBAL_STACK`
~@routine
# but wait, `k` should not be uncomputed
k += Ly
# restore the state
swap_state!(B_STACK, reg.state)
for j=1:Ly
apply_Gh!(reg, j, hs[(i-1)*Ly+j], A_STACK)
end
for j=1:Ly-1
k += 1
apply_Gvb!(reg, (@const (j, j+1)), Js[k], A_STACK)
end
end
summed ← one(TT)
i_sum(summed, reg.state)
NiLang.SWAP(summed.n, out!)
k → length(Js)
summed → one(TT)
~@routine
end
end
@i function swap_state!(B_STACK, state)
@invcheckoff for j=1:length(state)
@inbounds NiLang.SWAP(state[j], B_STACK[j])
end
end
@i function store_state!(B_STACK, state)
@invcheckoff for j = 1:length(state)
@inbounds TropicalYao.Reversible.unsafe_store!(B_STACK[j], state[j])
end
end
@i function isolve_largemem(out!, sg::Spinglass{SquareLattice, T}, reg::ArrayReg{B,TT}, REG_STACK) where {B,T,TT<:Tropical{T}}
@invcheckoff begin
@routine begin
Lx ← sg.lattice.Nx
Ly ← sg.lattice.Ny
Js ← sg.Js
hs ← sg.hs
end
@safe println("Layer 1/$Lx")
k ← 0
for j=1:Ly
apply_Gh!(reg, j, hs[j], REG_STACK)
end
for j=1:Ly-1
k += 1
apply_Gvb!(reg, (@const (j, j+1)), Js[k], REG_STACK)
end
for i=2:Lx
@safe println("Layer $i/$Lx")
for j=1:Ly
k += 1
apply_Ghb!(reg, j, Js[k], REG_STACK)
end
for j=1:Ly
apply_Gh!(reg, j, hs[(i-1)*Ly+j], REG_STACK)
end
for j=1:Ly-1
k += 1
apply_Gvb!(reg, (@const (j, j+1)), Js[k], REG_STACK)
end
end
summed ← one(TT)
i_sum(summed, reg.state)
NiLang.SWAP(summed.n, out!)
k → length(Js)
summed → one(TT)
~@routine
end
end
cachesize_A(lt::SquareLattice) = lt.Ny
cachesize_B(lt::SquareLattice) = lt.Nx-1
cachesize_largemem(lt::SquareLattice) = (lt.Nx-1) * lt.Ny
|
proofpile-julia0005-42541 | {
"provenance": "014.jsonl.gz:242542"
} | using WordCloud
using HTTP
url = "http://en.wikipedia.org/wiki/Special:random"
try
resp = HTTP.request("GET", url, redirect=true)
println(resp.request)
content = resp.body |> String
wc = wordcloud(content |> html2text |> processtext) |> generate!
println("results are saved to fromweb.png")
paint(wc, "fromweb.png")
wc
catch e
println(e)
end
#eval# runexample(:fromweb)
#md#  |
proofpile-julia0005-42542 | {
"provenance": "014.jsonl.gz:242543"
} | using Random
using Dates
using JuMP, BilevelJuMP
using MathOptInterface
MOI = MathOptInterface
MOIU = MOI.Utilities
MAX_TIME = 600#600
include("svr.jl")
include("rand.jl")
include("forecast.jl")
include("toll.jl")
mode = BilevelJuMP.SOS1Mode()
with_att = JuMP.optimizer_with_attributes
FA = BilevelJuMP.FortunyAmatMcCarlMode
using Xpress
using CPLEX
using Gurobi
using SCIP
using Cbc
using GLPK
using Ipopt
using KNITRO
using AmplNLWriter
using Mosek, MosekTools
using Juniper
using QuadraticToBinary
QB = QuadraticToBinary.Optimizer{Float64}
cache(opt) = MOIU.CachingOptimizer(
MOIU.UniversalFallback(MOIU.Model{Float64}()), opt)
function cpx()
s=CPLEX.Optimizer()
MOI.set(s, MOI.RawParameter("CPXPARAM_TimeLimit"),MAX_TIME*1)
s
end
# const KN_OPT = KNITRO.Optimizer(maxtimecpu = MAX_TIME*1.0)
# function new_knitro()
# MOI.empty!(KN_OPT)
# return KN_OPT
# end
SOLVERS = [
#=
=#
#=
SOS1
=#
# (with_att(Gurobi.Optimizer, "TimeLimit" => MAX_TIME*1), BilevelJuMP.SOS1Mode(), "gurobi_sos1"),
# (with_att(CPLEX.Optimizer, "CPXPARAM_TimeLimit" => MAX_TIME*1), BilevelJuMP.SOS1Mode(), "cplex_sos1"),
# (with_att(Xpress.Optimizer, "MAXTIME" => -MAX_TIME*1, "logfile" => "output.log"), BilevelJuMP.SOS1Mode(), "xpress_sos1"),
# (with_att(Cbc.Optimizer, "seconds" => MAX_TIME*1.0), BilevelJuMP.SOS1Mode(), "cbc_sos1"),
# (with_att(SCIP.Optimizer, "limits/time" => MAX_TIME*1), BilevelJuMP.SOS1Mode(), "scip_sos1"),
#=
indicator
=#
# (with_att(CPLEX.Optimizer, "CPXPARAM_TimeLimit" => MAX_TIME), BilevelJuMP.IndicatorMode(), "cplex_indc"),
# # (with_att(Gurobi.Optimizer, "TimeLimit" => MAX_TIME), BilevelJuMP.IndicatorMode(), "gurobi_indc"), # not supporting indicator
# (with_att(Xpress.Optimizer, "MAXTIME" => -MAX_TIME, "logfile" => "output.log"), BilevelJuMP.IndicatorMode(), "xpress_indc"),
# (with_att(Cbc.Optimizer, "seconds" => MAX_TIME*1.0), BilevelJuMP.IndicatorMode(), "cbc_indc"),
# # (with_att(SCIP.Optimizer, "limits/time" => MAX_TIME), BilevelJuMP.IndicatorMode(), "scip_indc"), # weird offset error
#=
Fortuny-Amat 10
=#
# # (with_att(GLPK.Optimizer, "tm_lim" => MAX_TIME * 1_000), FA(primal_big_M = 10, dual_big_M = 10), "glpk_fa10"),
# # (with_att(Mosek.Optimizer, "MIO_MAX_TIME" => MAX_TIME * 1.0, "OPTIMIZER_MAX_TIME" => MAX_TIME * 1.0), FA(primal_big_M = 10, dual_big_M = 10), "mosek_fa10"), # no sos1
# # (with_att(Gurobi.Optimizer, "TimeLimit" => MAX_TIME*1), FA(primal_big_M = 10, dual_big_M = 10), "gurobi_fa10"),
# (with_att(CPLEX.Optimizer, "CPXPARAM_TimeLimit" => MAX_TIME*1), FA(primal_big_M = 10, dual_big_M = 10), "cplex_fa10"), #TODO
# (with_att(Xpress.Optimizer, "MAXTIME" => -MAX_TIME*1, "logfile" => "output.log"), FA(primal_big_M = 10, dual_big_M = 10), "xpress_fa10"),
# (with_att(Cbc.Optimizer, "seconds" => MAX_TIME*1.0), FA(primal_big_M = 10, dual_big_M = 10), "cbc_fa10"),
# (with_att(SCIP.Optimizer, "limits/time" => MAX_TIME*1), FA(primal_big_M = 10, dual_big_M = 10), "scip_fa10"),
#=
Fortuny-Amat 100
=#
# (with_att(GLPK.Optimizer, "tm_lim" => MAX_TIME * 1_000), FA(primal_big_M = 100, dual_big_M = 100), "glpk_fa100"),
# (with_att(Mosek.Optimizer, "MIO_MAX_TIME" => MAX_TIME * 1.0, "OPTIMIZER_MAX_TIME" => MAX_TIME * 1.0), FA(primal_big_M = 100, dual_big_M = 100), "mosek_fa100"), # no sos1
# (with_att(Gurobi.Optimizer, "TimeLimit" => MAX_TIME*1), FA(primal_big_M = 100, dual_big_M = 100), "gurobi_fa100"),
# (with_att(CPLEX.Optimizer, "CPXPARAM_TimeLimit" => MAX_TIME*1), FA(primal_big_M = 100, dual_big_M = 100), "cplex_fa100"),
# (with_att(Xpress.Optimizer, "MAXTIME" => -MAX_TIME*1, "logfile" => "output.log"), FA(primal_big_M = 100, dual_big_M = 100), "xpress_fa100"),
# (with_att(Cbc.Optimizer, "seconds" => MAX_TIME*1.0), FA(primal_big_M = 100, dual_big_M = 100), "cbc_fa100"),
# (with_att(SCIP.Optimizer, "limits/time" => MAX_TIME*1), FA(primal_big_M = 100, dual_big_M = 100), "scip_fa100"),
#=
Product NLP
=#
# (with_att(Ipopt.Optimizer, "max_cpu_time" => MAX_TIME*1.0), BilevelJuMP.ProductMode(1e-7), "ipopt_prod"),
# (new_knitro(), BilevelJuMP.ProductMode(1e-7), "knitro_prod"),
#=
Product BIN 10
=#
# (()->QB(GLPK.Optimizer(tm_lim=MAX_TIME*1_000),lb=-10,ub=10), BilevelJuMP.ProductMode(1e-7), "glpk_prod10"),
# (()->QB(Mosek.Optimizer(MIO_MAX_TIME=MAX_TIME*1.0,OPTIMIZER_MAX_TIME=MAX_TIME*1.0),lb=-10,ub=10), BilevelJuMP.ProductMode(1e-7), "mosek_prod10"),
# (()->QB(Gurobi.Optimizer(TimeLimit=MAX_TIME*1),lb=-10,ub=10), BilevelJuMP.ProductMode(1e-7), "gurobi_prod10"),
# (()->QB(cpx(),lb=-10,ub=10), BilevelJuMP.ProductMode(1e-7), "cplex_prod10"), #TODO
# (()->QB(Xpress.Optimizer(MAXTIME=-MAX_TIME*1),lb=-10,ub=10), BilevelJuMP.ProductMode(1e-7), "xpress_prod10"),
# (()->QB(cache(Cbc.Optimizer(seconds=MAX_TIME*1.0)),lb=-10,ub=10), BilevelJuMP.ProductMode(1e-7), "cbc_prod10"),
# (()->QB(SCIP.Optimizer(limits_time=MAX_TIME*1),lb=-10,ub=10), BilevelJuMP.ProductMode(1e-7), "scip_prod10"),
#=
Product BIN 100
=#
# (()->QB(GLPK.Optimizer(tm_lim=MAX_TIME*1_000),lb=-100,ub=100), BilevelJuMP.ProductMode(1e-7), "glpk_prod100"),
# (()->QB(Mosek.Optimizer(MIO_MAX_TIME=MAX_TIME*1.0,OPTIMIZER_MAX_TIME=MAX_TIME*1.0),lb=-100,ub=100), BilevelJuMP.ProductMode(1e-7), "mosek_prod100"),
# (()->QB(Gurobi.Optimizer(TimeLimit=MAX_TIME*1),lb=-100,ub=100), BilevelJuMP.ProductMode(1e-7), "gurobi_prod100"),
# (()->QB(cpx(),lb=-100,ub=100), BilevelJuMP.ProductMode(1e-7), "cplex_prod100"),
# (()->QB(Xpress.Optimizer(MAXTIME=-MAX_TIME*1),lb=-100,ub=100), BilevelJuMP.ProductMode(1e-7), "xpress_prod100"),
# (()->QB(cache(Cbc.Optimizer(seconds=MAX_TIME*1.0)),lb=-100,ub=100), BilevelJuMP.ProductMode(1e-7), "cbc_prod100"),
# (()->QB(SCIP.Optimizer(limits_time=MAX_TIME*1),lb=-100,ub=100), BilevelJuMP.ProductMode(1e-7), "scip_prod100"),
#=
PrimalDual BIN 10
=#
# (()->QB(GLPK.Optimizer(tm_lim=MAX_TIME*1_000),lb=-10,ub=10), BilevelJuMP.StrongDualityEqualityMode(), "glpk_sd10"),
# (()->QB(Mosek.Optimizer(MIO_MAX_TIME=MAX_TIME*1.0,OPTIMIZER_MAX_TIME=MAX_TIME*1.0),lb=-10,ub=10), BilevelJuMP.StrongDualityEqualityMode(), "mosek_sd10"),
# (()->QB(Gurobi.Optimizer(TimeLimit=MAX_TIME*1),lb=-10,ub=10), BilevelJuMP.StrongDualityEqualityMode(), "gurobi_sd10"),
# (()->QB(cpx(),lb=-10,ub=10), BilevelJuMP.StrongDualityEqualityMode(), "cplex_sd10"),
# (()->QB(Xpress.Optimizer(MAXTIME=-MAX_TIME*1),lb=-10,ub=10), BilevelJuMP.StrongDualityEqualityMode(), "xpress_sd10"),
# (()->QB(cache(Cbc.Optimizer(seconds=MAX_TIME*1.0)),lb=-10,ub=10), BilevelJuMP.StrongDualityEqualityMode(), "cbc_sd10"), #TODO
(()->QB(SCIP.Optimizer(limits_time=MAX_TIME*1),lb=-10,ub=10), BilevelJuMP.StrongDualityEqualityMode(), "scip_sd10"),
#=
PrimalDual BIN 100
=#
(()->QB(GLPK.Optimizer(tm_lim=MAX_TIME*1_000),lb=-100,ub=100), BilevelJuMP.StrongDualityEqualityMode(), "glpk_sd100"),
(()->QB(Mosek.Optimizer(MIO_MAX_TIME=MAX_TIME*1.0,OPTIMIZER_MAX_TIME=MAX_TIME*1.0),lb=-100,ub=100), BilevelJuMP.StrongDualityEqualityMode(), "mosek_sd100"),
(()->QB(Gurobi.Optimizer(TimeLimit=MAX_TIME*1),lb=-100,ub=100), BilevelJuMP.StrongDualityEqualityMode(), "gurobi_sd100"),
(()->QB(cpx(),lb=-100,ub=100), BilevelJuMP.StrongDualityEqualityMode(), "cplex_sd100"),
(()->QB(Xpress.Optimizer(MAXTIME=-MAX_TIME*1),lb=-100,ub=100), BilevelJuMP.StrongDualityEqualityMode(), "xpress_sd100"),
(()->QB(cache(Cbc.Optimizer(seconds=MAX_TIME*1.0)),lb=-100,ub=100), BilevelJuMP.StrongDualityEqualityMode(), "cbc_sd100"),
(()->QB(SCIP.Optimizer(limits_time=MAX_TIME*1),lb=-100,ub=100), BilevelJuMP.StrongDualityEqualityMode(), "scip_sd100"),
#=
Complemets
=#
# #(with_att(KNITRO.Optimizer, "maxtimecpu" => MAX_TIME*1.0), BilevelJuMP.ComplementMode(), "knitro_comp"),
#=
Product global
=#
# # (() -> AmplNLWriter.Optimizer("bonmin"), BilevelJuMP.ProductMode(1e-5)),
# # (() -> AmplNLWriter.Optimizer("couenne"), BilevelJuMP.ProductMode(1e-5)),
]
PROBLEMS = [
:SVR,
# :RAND,
:TOLL,
:FORECAST,
]
SEEDS = [
1234,
# 2345,
# 3456,
# 4567,
# 5678,
# 6789,
# 7890,
# 8901,
# 9012,
# 0123,
]
SVR = [
# (features, sample_size)
( 1, 10),
( 1, 10),
( 2, 10),
( 5, 10),
( 1, 100),
( 2, 100), # 600
( 5, 100),
( 10, 100),
( 20, 100),
( 50, 100),
# ( 1,1000), # hard for prod10
# ( 2,1000),
# ( 5,1000),
# # # ( 10,1000),
# # # ( 20,1000),
# # # ( 50,1000),
# # # (100,1000),
# # # (200,1000),
# # # (500,1000),
# # () for i in [1,2,5,10,20,50,100,200], j in [10, 100, 1000]
]
RAND = [
# (rows, cols)
( 5, 5),
( 10, 5),
( 5, 10),
( 10, 10),
( 50, 10),
( 10, 50),
( 50, 50), # 600
( 100, 50),
( 50, 100),
( 100, 100),
# # ( 500, 100),
# # ( 100, 500),
# # ( 500, 500),
# # (1000, 500),
# # ( 500,1000),
# # (1000,1000),
# # (5000,1000),
# # (1000,5000),
# # (5000,5000),
]
TOLL = [
# nodes
5,
10,
20,
# 50, # hard for prod10
# 100,
# 200, # also massive on memory
# 500, # 600 - broke gurobi
# # 1000,
# # 2000,
# # 5000,
]
FORECAST = [
# (products, sample_size)
( 1, 10),
( 2, 10),
( 5, 10),
( 1, 100),
# ( 2, 100), # hard for prod10
# ( 5, 100), # 600
# ( 10, 100),
# ( 20, 100),
# ( 50, 100),
# ( 1,1000),
# ( 2,1000),
# ( 5,1000),
# # ( 10,1000),
# # ( 20,1000),
# # ( 50,1000),
# # (100,1000),
# # (200,1000),
# # (500,1000),
# # () for i in [1,2,5,10,20,50,100,200], j in [10, 100, 1000]
]
function separator()
println()
println()
println("============================================================")
println("============================================================")
println()
println()
end
function new_file()
cd(dirname(@__FILE__))
FILE = open("bench$(replace("$(now())",":"=>"_")).log", "w")
println(FILE, "opt_mode, prob, inst, primal_status, termination_status, solve_time, build_time, lower_obj, upper_obj")
flush(FILE)
return FILE
end
function newline(FILE, data, opt, prb, inst, seed)
println(FILE, "$opt, $prb, $inst, $seed, $(data[1]),$(data[2]),$(data[3]),$(data[4]),$(data[5]),$(data[6]),$(data[7])")
flush(FILE)
end
FILE = new_file()
for seed in SEEDS
for (optimizer, mode, name) in SOLVERS
if :SVR in PROBLEMS
for (features, samples) in SVR
separator()
@show features, samples, seed, name
separator()
ret = bench_svr(features, samples, optimizer, mode, seed)
newline(FILE, ret, name, :SVR, (features, samples), seed)
end
end
if :RAND in PROBLEMS
for (rows, cols) in RAND
separator()
@show rows, cols, seed, name
separator()
ret = bench_rand(rows, cols, 0.5, optimizer, mode, seed)
newline(FILE, ret, name, :RAND, (rows, cols), seed)
end
end
if :TOLL in PROBLEMS
for nodes in TOLL
separator()
@show nodes, seed, name
separator()
ret = bench_toll(nodes, optimizer, mode, seed)
newline(FILE, ret, name, :TOLL, (nodes,), seed)
end
end
if :FORECAST in PROBLEMS
for (products, samples) in FORECAST
separator()
@show products, samples, seed, name
separator()
ret = bench_forecast(products, samples, optimizer, mode, seed)
newline(FILE, ret, name, :FORE, (products, samples), seed)
end
end
end
end
close(FILE)
exit(0) |
proofpile-julia0005-42543 | {
"provenance": "014.jsonl.gz:242544"
} | using MultivariateOrthogonalPolynomials, ArrayLayouts, BandedMatrices, BlockArrays, Test
import MultivariateOrthogonalPolynomials: ModalInterlace, ModalInterlaceLayout, ModalTrav
@testset "modalTrav" begin
a = ModalTrav(randn(2,5))
b = PseudoBlockArray(a)
v = Vector(a)
@test zero(a) isa ModalTrav
@test zero(a) == zero(b)
@test exp.(a) isa ModalTrav
@test 2a isa ModalTrav
@test a+a isa ModalTrav
@test a+b isa PseudoBlockArray
@test a+v isa BlockArray
@test a .+ exp.(a .+ 1) isa ModalTrav
@test exp.(a) == exp.(b)
@test a + a == 2a == a+b
@test a .+ exp.(a .+ 1) == b .+ exp.(b .+ 1)
for k = 1:6
a[k] = k
end
@test a == 1:6
m = ModalTrav(reshape(1:10, 2, 5))
@test m[Block(3)] == [2,7,9]
@test m == [1,3,5,2,7,9]
@test copy(m) isa ModalTrav{Int,Matrix{Int}}
@test copy(m) == m
end
@testset "ModalInterlace" begin
ops = [brand(2,3,1,2), brand(1,2,1,1), brand(1,2,1,2)]
A = ModalInterlace(ops, (3,5), (2,4))
@test MemoryLayout(A) isa ModalInterlaceLayout
@test A[[1,4],[1,4,11]] == ops[1]
@test A[[2],[2,7]] == ops[2]
@test A[[5],[5,12]] == A[[6],[6,13]] == ops[3]
b = ModalTrav(1:15)
@test A*b ≈ Matrix(A) * Vector(b)
ops = [brand(3,3,1,2), brand(2,2,1,1), brand(2,2,1,2), brand(1,1,1,1), brand(1,1,1,2)]
B = ModalInterlace(ops, (5,5), (2,4))
@test B\b ≈ Matrix(B) \ Vector(b)
end |
proofpile-julia0005-42544 | {
"provenance": "014.jsonl.gz:242545"
} | struct RejectionExact <: AbstractRejectionExact end
function solve(problem::PDMPProblem, Flow::Function; verbose::Bool = false, save_rejected = false, ind_save_d = -1:1, ind_save_c = -1:1, n_jumps = Inf64, save_positions = (false, true), save_rate = false, finalizer = finalize_dummy)
verbose && println("#"^30)
verbose && printstyled(color=:red,"--> Start Rejection method\n")
# initialise the problem. If I call twice this function, it should give the same result...
init!(problem)
# we declare the characteristics for convenience
caract = problem.caract
ratecache = caract.ratecache
simjptimes = problem.simjptimes
ti, tf = problem.tspan
# it is faster to pre-allocate arrays and fill it at run time
n_jumps += 0 # to hold initial vector
n_reject = 0 # to hold the number of rejects
nsteps = 1
npoints = 2 # number of points for ODE integration
xc0 = caract.xc
xd0 = caract.xd
# Set up initial variables
t = ti
X0 = copy(xc0)
Xd = copy(xd0)
res_ode = zeros(2, length(X0))
X0, _, Xd, _, xc_hist, xd_hist, res_ode, ind_save_d, ind_save_c = allocate_arrays(ti, xc0, xd0, n_jumps; rejection = true)
tp = [ti, tf] # vector to hold the time interval over which to integrate the flow
#variables for rejection algorithm
reject = true
lambda_star = 0.0 # this is the bound for the rejection method
ppf = caract.R(ratecache.rate, X0, Xd, caract.parms, t, true)
δt = simjptimes.tstop_extended
while (t < tf) && (nsteps < n_jumps)
verbose && println("--> step : ",nsteps," / ", n_jumps)
reject = true
while reject && (nsteps < n_jumps)
tp .= [t, min(tf, t + δt / ppf[2]) ] #mettre un lambda_star?
Flow(res_ode, X0, Xd, tp)
@inbounds for ii in eachindex(X0)
X0[ii] = res_ode[end, ii]
end
verbose && println("----> δt = ", δt, ", t∈", tp, ", dt = ", tp[2]-tp[1], ", xc = ", X0)
t = tp[end]
ppf = caract.R(ratecache.rate, X0, Xd, caract.parms, t, true)
@assert ppf[1] <= ppf[2] "(Rejection algorithm) Your bound on the total rate is wrong, $ppf"
if t == tf
reject = false
else
reject = rand() < 1 - ppf[1] / ppf[2]
end
δt = -log(rand())
if reject
n_reject += 1
end
end
# there is a jump!
ppf = caract.R(ratecache.rate, X0, Xd, caract.parms, t, false)
if (t < tf)
verbose && println("----> Jump!, ratio = ", ppf[1] / ppf[2], ", xd = ", Xd)
# make a jump
ev = pfsample(ratecache.rate)
# we perform the jump
affect!(caract.pdmpjump, ev, X0, Xd, caract.parms, t)
end
nsteps += 1
pushTime!(problem, t)
push!(xc_hist, X0[ind_save_c])
push!(xd_hist, Xd[ind_save_d])
save_rate && push!(problem.rate_hist, sum(ratecache.rate))
finalizer(ratecache.rate, caract.xc, caract.xd, caract.parms, t)
end
if verbose println("--> Done") end
if verbose println("--> xd = ",xd_hist[:,1:nsteps]) end
return PDMPResult(problem.time, xc_hist, xd_hist, problem.rate_hist, save_positions, nsteps, n_reject)
end
function solve(problem::PDMPProblem, algo::Rejection{Tode}; reltol = 1e-7, abstol = 1e-9, kwargs...) where {Tode <: Symbol}
ode = algo.ode
@assert ode in [:cvode, :lsoda, :adams, :bdf]
caract = problem.caract
# define the ODE flow
if ode == :cvode || ode == :bdf
Flow0 = (X0_,Xd,tp_) -> Sundials.cvode( (tt,x,xdot) -> caract.F(xdot,x,Xd,caract.parms,tt), X0_, tp_, abstol = abstol, reltol = reltol, integrator = :BDF)
elseif ode == :adams
Flow0 = (X0_,Xd,tp_) -> Sundials.cvode( (tt,x,xdot) -> caract.F(xdot,x,Xd,caract.parms,tt), X0_, tp_, abstol = abstol, reltol = reltol, integrator = :Adams)
elseif ode == :lsoda
Flow0 = (X0_,Xd,tp_) -> LSODA.lsoda((tt,x,xdot,data) -> caract.F(xdot,x,Xd,caract.parms,tt), X0_, tp_, abstol = abstol, reltol = reltol)
end
Flow = (out,X0_,Xd,tp_) -> (out .= Flow0(X0_,Xd,tp_))
return solve(problem, Flow; kwargs...)
end
function solve(problem::PDMPProblem, algo::Talgo; kwargs...) where {Talgo <: AbstractRejectionExact}
Flow = (res_ode, X0, Xd, tp) -> problem.caract.F(res_ode, X0, Xd, problem.caract.parms, tp)
solve(problem, Flow; kwargs...)
end
|
proofpile-julia0005-42545 | {
"provenance": "014.jsonl.gz:242546"
} | using BinaryProvider
include("compile.jl")
const verbose = ("--verbose" in ARGS)
const prefix = Prefix(get([a for a in ARGS if a != "--verbose"], 1, joinpath(@__DIR__, "usr")))
products = [
LibraryProduct(prefix, String["libextraf"], :libextraf),
]
src_url = "https://mkatase.github.io/github-hosted/data/libmakef.tar.gz"
src_hash = "d33fef76bf8026112b7d441a4038225d679f305eb23fe2dfb5b249ce270eafbe"
src_path = joinpath(prefix, "downloads", "libmakef.tar.gz")
libname = "libextraf"
if !isfile(src_path) || !verify(src_path, src_hash; verbose=verbose)
println("Go to compile")
compile(libname, src_url, src_hash, prefix=prefix, verbose=verbose)
end
write_deps_file(joinpath(@__DIR__, "deps.jl"), products, verbose=verbose)
println("end of build.jl script")
|
proofpile-julia0005-42546 | {
"provenance": "014.jsonl.gz:242547"
} | using YAML, DataFrames, CSV, Plots
using Statistics
using Streamfall
HERE = @__DIR__
DATA_PATH = joinpath(HERE, "../../test/data/hymod/")
# Load and generate stream network
network = YAML.load_file(joinpath(DATA_PATH, "hymod_network.yml"))
sn = create_network("HyMod Network", network)
# Load climate data
date_format = "YYYY-mm-dd"
obs_data = CSV.File(joinpath(DATA_PATH, "leaf_river_data.csv"),
comment="#",
dateformat=date_format) |> DataFrame
hist_streamflow = obs_data[:, "leaf_river_outflow"]
climate_data = obs_data[:, ["Date", "leaf_river_P", "leaf_river_ET"]]
climate = Climate(climate_data, "_P", "_ET")
# This will set node parameters to the optimal values found
metric = (obs, sim) -> 1.0 - Streamfall.NNSE(obs, sim)
calibrate!(sn, climate, hist_streamflow; metric=metric, MaxTime=90.0)
# Save calibrated network spec to file
Streamfall.save_network_spec(sn, "hymod_example_calibrated.yml")
# Run the model as a single system
run_basin!(sn, climate)
# Get node
nid, node = sn["leaf_river"]
@info "Mean flow (ft^3/s)" mean(node.outflow)
# Get performance and plot
obs = hist_streamflow[:, "leaf_river_outflow"]
@info "RMSE" Streamfall.RMSE(obs, node.outflow)
@info "NSE" Streamfall.NSE(obs, node.outflow)
plot(obs)
plot!(node.outflow)
|
proofpile-julia0005-42547 | {
"provenance": "014.jsonl.gz:242548"
} | module JLBOX_MODULETest
include("$(pwd())/test/helper.jl")
reload("$(pwd())/src/JLBOX_MODULE.jl")
using JLBOX_MODULE
facts("JLBOX_MODULE") do
@fact isempty(lintfile("$(pwd())/src/JLBOX_MODULE.jl", returnMsgs=true)) => true
@fact isempty(lintfile("$(pwd())/test/JLBOX_MODULE_test.jl", returnMsgs=true)) => true
end
end # module JLBOX_MODULETest
|
proofpile-julia0005-42548 | {
"provenance": "014.jsonl.gz:242549"
} | struct MaximalCoupling{U1<:Distribution, U2<:Distribution}
p::U1
q::U2
function MaximalCoupling{U1, U2}(p::U1, q::U2) where {U1, U2}
length(p) == length(q) ||
throw(DimensionMismatch("Coupled distributions of different dimension."))
eltype(p) == eltype(q) ||
throw(ArgumentError("Coupled distributions of different types."))
new(p, q)
end
end
MaximalCoupling(p::U1, q::U2) where {U1, U2} = MaximalCoupling{U1, U2}(p, q)
length(coup::MaximalCoupling) = length(coup.p)
eltype(coup::MaximalCoupling) = Union{eltype(coup.p), eltype(coup.q)}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.