id
stringlengths 22
42
| metadata
dict | text
stringlengths 9
1.03M
|
---|---|---|
proofpile-julia0005-42649 | {
"provenance": "014.jsonl.gz:242650"
} | commands = readlines("day14/input.txt")
function mask_bits(mask, value)
value_bits = bitstring(value)
out_bitstring = []
for i in 0:35
if mask[end-i] == 'X'
pushfirst!(out_bitstring, value_bits[end-i])
else
pushfirst!(out_bitstring, mask[end-i])
end
end
return parse(Int, join(out_bitstring); base=2)
end
current_mask = ""
memory = Dict{Int, Int}()
for instruction in commands
if contains(instruction, "[")
address, value = match(r"mem\[(\d+)\] = (\d+)", instruction).captures
global memory[parse(Int, address)] = mask_bits(current_mask, parse(Int, value))
else
global current_mask = match(r"mask = (\w+)", instruction).captures[1]
end
end
println(sum(v for v in values(memory)))
# That was fun! Ending with a sub 1k place too! |
proofpile-julia0005-42650 | {
"provenance": "014.jsonl.gz:242651"
} | function inverse(side::HorizonSide)
return HorizonSide(mod(Int(side) + 2, 4))
end
function f()
steps = []
while !isborder(robot, Nord)
move!(robot, Nord)
push!(steps, inverse(Nord))
end
while !isborder(robot, Ost)
move!(robot, Ost)
push!(steps, inverse(Ost))
end
# Тут решение.
side = Ost
isOk = false
for i = 1:10
move!(robot, Sud)
side = inverse(side)
for j = 1:11
if !isborder(robot, side)
move!(robot, side)
else
isOk = true
break
end
end
if isOk
break
end
end
while isborder(robot, side)
putmarker!(robot)
move!(robot, Sud)
end
putmarker!(robot)
move!(robot, side)
while isborder(robot, Nord)
putmarker!(robot)
move!(robot, side)
end
putmarker!(robot)
move!(robot, Nord)
side = inverse(side)
while isborder(robot, side)
putmarker!(robot)
move!(robot, Nord)
end
putmarker!(robot)
move!(robot, side)
while isborder(robot, Sud)
putmarker!(robot)
move!(robot, side)
end
putmarker!(robot)
while !isborder(robot, Nord)
move!(robot, Nord)
end
while !isborder(robot, Ost)
move!(robot, Ost)
end
while length(steps) > 0
move!(robot, pop!(steps))
end
end
f() |
proofpile-julia0005-42651 | {
"provenance": "014.jsonl.gz:242652"
} | # Julia wrapper for header: /home/gkraemer/.julia/dev/LevelDB/deps/src/leveldb-1.20/include/leveldb/c.h
# Automatically generated using Clang.jl wrap_c
function leveldb_open(options, name, errptr)
ccall((:leveldb_open, libleveldb), Ptr{leveldb_t}, (Ptr{leveldb_options_t}, Cstring, Ptr{Cstring}), options, name, errptr)
end
function leveldb_close(db)
ccall((:leveldb_close, libleveldb), Cvoid, (Ptr{leveldb_t},), db)
end
function leveldb_put(db, options, key, keylen, val, vallen, errptr)
ccall((:leveldb_put, libleveldb), Cvoid, (Ptr{leveldb_t}, Ptr{leveldb_writeoptions_t}, Cstring, Csize_t, Cstring, Csize_t, Ptr{Cstring}), db, options, key, keylen, val, vallen, errptr)
end
function leveldb_delete(db, options, key, keylen, errptr)
ccall((:leveldb_delete, libleveldb), Cvoid, (Ptr{leveldb_t}, Ptr{leveldb_writeoptions_t}, Cstring, Csize_t, Ptr{Cstring}), db, options, key, keylen, errptr)
end
function leveldb_write(db, options, batch, errptr)
ccall((:leveldb_write, libleveldb), Cvoid, (Ptr{leveldb_t}, Ptr{leveldb_writeoptions_t}, Ptr{leveldb_writebatch_t}, Ptr{Cstring}), db, options, batch, errptr)
end
function leveldb_get(db, options, key, keylen, vallen, errptr)
ccall((:leveldb_get, libleveldb), Cstring, (Ptr{leveldb_t}, Ptr{leveldb_readoptions_t}, Cstring, Csize_t, Ptr{Csize_t}, Ptr{Cstring}), db, options, key, keylen, vallen, errptr)
end
function leveldb_create_iterator(db, options)
ccall((:leveldb_create_iterator, libleveldb), Ptr{leveldb_iterator_t}, (Ptr{leveldb_t}, Ptr{leveldb_readoptions_t}), db, options)
end
function leveldb_create_snapshot(db)
ccall((:leveldb_create_snapshot, libleveldb), Ptr{leveldb_snapshot_t}, (Ptr{leveldb_t},), db)
end
function leveldb_release_snapshot(db, snapshot)
ccall((:leveldb_release_snapshot, libleveldb), Cvoid, (Ptr{leveldb_t}, Ptr{leveldb_snapshot_t}), db, snapshot)
end
function leveldb_property_value(db, propname)
ccall((:leveldb_property_value, libleveldb), Cstring, (Ptr{leveldb_t}, Cstring), db, propname)
end
function leveldb_approximate_sizes(db, num_ranges, range_start_key, range_start_key_len, range_limit_key, range_limit_key_len, sizes)
ccall((:leveldb_approximate_sizes, libleveldb), Cvoid, (Ptr{leveldb_t}, Cint, Ptr{Cstring}, Ptr{Csize_t}, Ptr{Cstring}, Ptr{Csize_t}, Ptr{UInt64}), db, num_ranges, range_start_key, range_start_key_len, range_limit_key, range_limit_key_len, sizes)
end
function leveldb_compact_range(db, start_key, start_key_len, limit_key, limit_key_len)
ccall((:leveldb_compact_range, libleveldb), Cvoid, (Ptr{leveldb_t}, Cstring, Csize_t, Cstring, Csize_t), db, start_key, start_key_len, limit_key, limit_key_len)
end
function leveldb_destroy_db(options, name, errptr)
ccall((:leveldb_destroy_db, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Cstring, Ptr{Cstring}), options, name, errptr)
end
function leveldb_repair_db(options, name, errptr)
ccall((:leveldb_repair_db, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Cstring, Ptr{Cstring}), options, name, errptr)
end
function leveldb_iter_destroy(arg1)
ccall((:leveldb_iter_destroy, libleveldb), Cvoid, (Ptr{leveldb_iterator_t},), arg1)
end
function leveldb_iter_valid(arg1)
ccall((:leveldb_iter_valid, libleveldb), Cuchar, (Ptr{leveldb_iterator_t},), arg1)
end
function leveldb_iter_seek_to_first(arg1)
ccall((:leveldb_iter_seek_to_first, libleveldb), Cvoid, (Ptr{leveldb_iterator_t},), arg1)
end
function leveldb_iter_seek_to_last(arg1)
ccall((:leveldb_iter_seek_to_last, libleveldb), Cvoid, (Ptr{leveldb_iterator_t},), arg1)
end
function leveldb_iter_seek(arg1, k, klen)
ccall((:leveldb_iter_seek, libleveldb), Cvoid, (Ptr{leveldb_iterator_t}, Cstring, Csize_t), arg1, k, klen)
end
function leveldb_iter_next(arg1)
ccall((:leveldb_iter_next, libleveldb), Cvoid, (Ptr{leveldb_iterator_t},), arg1)
end
function leveldb_iter_prev(arg1)
ccall((:leveldb_iter_prev, libleveldb), Cvoid, (Ptr{leveldb_iterator_t},), arg1)
end
function leveldb_iter_key(arg1, klen)
ccall((:leveldb_iter_key, libleveldb), Cstring, (Ptr{leveldb_iterator_t}, Ptr{Csize_t}), arg1, klen)
end
function leveldb_iter_value(arg1, vlen)
ccall((:leveldb_iter_value, libleveldb), Cstring, (Ptr{leveldb_iterator_t}, Ptr{Csize_t}), arg1, vlen)
end
function leveldb_iter_get_error(arg1, errptr)
ccall((:leveldb_iter_get_error, libleveldb), Cvoid, (Ptr{leveldb_iterator_t}, Ptr{Cstring}), arg1, errptr)
end
function leveldb_writebatch_create()
ccall((:leveldb_writebatch_create, libleveldb), Ptr{leveldb_writebatch_t}, ())
end
function leveldb_writebatch_destroy(arg1)
ccall((:leveldb_writebatch_destroy, libleveldb), Cvoid, (Ptr{leveldb_writebatch_t},), arg1)
end
function leveldb_writebatch_clear(arg1)
ccall((:leveldb_writebatch_clear, libleveldb), Cvoid, (Ptr{leveldb_writebatch_t},), arg1)
end
function leveldb_writebatch_put(arg1, key, klen, val, vlen)
ccall((:leveldb_writebatch_put, libleveldb), Cvoid, (Ptr{leveldb_writebatch_t}, Cstring, Csize_t, Cstring, Csize_t), arg1, key, klen, val, vlen)
end
function leveldb_writebatch_delete(arg1, key, klen)
ccall((:leveldb_writebatch_delete, libleveldb), Cvoid, (Ptr{leveldb_writebatch_t}, Cstring, Csize_t), arg1, key, klen)
end
function leveldb_writebatch_iterate(arg1, state, put, deleted)
ccall((:leveldb_writebatch_iterate, libleveldb), Cvoid, (Ptr{leveldb_writebatch_t}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, state, put, deleted)
end
function leveldb_options_create()
ccall((:leveldb_options_create, libleveldb), Ptr{leveldb_options_t}, ())
end
function leveldb_options_destroy(arg1)
ccall((:leveldb_options_destroy, libleveldb), Cvoid, (Ptr{leveldb_options_t},), arg1)
end
function leveldb_options_set_comparator(arg1, arg2)
ccall((:leveldb_options_set_comparator, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Ptr{leveldb_comparator_t}), arg1, arg2)
end
function leveldb_options_set_filter_policy(arg1, arg2)
ccall((:leveldb_options_set_filter_policy, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Ptr{leveldb_filterpolicy_t}), arg1, arg2)
end
function leveldb_options_set_create_if_missing(arg1, arg2)
ccall((:leveldb_options_set_create_if_missing, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Cuchar), arg1, arg2)
end
function leveldb_options_set_error_if_exists(arg1, arg2)
ccall((:leveldb_options_set_error_if_exists, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Cuchar), arg1, arg2)
end
function leveldb_options_set_paranoid_checks(arg1, arg2)
ccall((:leveldb_options_set_paranoid_checks, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Cuchar), arg1, arg2)
end
function leveldb_options_set_env(arg1, arg2)
ccall((:leveldb_options_set_env, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Ptr{leveldb_env_t}), arg1, arg2)
end
function leveldb_options_set_info_log(arg1, arg2)
ccall((:leveldb_options_set_info_log, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Ptr{leveldb_logger_t}), arg1, arg2)
end
function leveldb_options_set_write_buffer_size(arg1, arg2)
ccall((:leveldb_options_set_write_buffer_size, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Csize_t), arg1, arg2)
end
function leveldb_options_set_max_open_files(arg1, arg2)
ccall((:leveldb_options_set_max_open_files, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Cint), arg1, arg2)
end
function leveldb_options_set_cache(arg1, arg2)
ccall((:leveldb_options_set_cache, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Ptr{leveldb_cache_t}), arg1, arg2)
end
function leveldb_options_set_block_size(arg1, arg2)
ccall((:leveldb_options_set_block_size, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Csize_t), arg1, arg2)
end
function leveldb_options_set_block_restart_interval(arg1, arg2)
ccall((:leveldb_options_set_block_restart_interval, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Cint), arg1, arg2)
end
function leveldb_options_set_compression(arg1, arg2)
ccall((:leveldb_options_set_compression, libleveldb), Cvoid, (Ptr{leveldb_options_t}, Cint), arg1, arg2)
end
function leveldb_comparator_create(state, destructor, compare, name)
ccall((:leveldb_comparator_create, libleveldb), Ptr{leveldb_comparator_t}, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}), state, destructor, compare, name)
end
function leveldb_comparator_destroy(arg1)
ccall((:leveldb_comparator_destroy, libleveldb), Cvoid, (Ptr{leveldb_comparator_t},), arg1)
end
function leveldb_filterpolicy_create(state, destructor, create_filter, key_may_match, name)
ccall((:leveldb_filterpolicy_create, libleveldb), Ptr{leveldb_filterpolicy_t}, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}), state, destructor, create_filter, key_may_match, name)
end
function leveldb_filterpolicy_destroy(arg1)
ccall((:leveldb_filterpolicy_destroy, libleveldb), Cvoid, (Ptr{leveldb_filterpolicy_t},), arg1)
end
function leveldb_filterpolicy_create_bloom(bits_per_key)
ccall((:leveldb_filterpolicy_create_bloom, libleveldb), Ptr{leveldb_filterpolicy_t}, (Cint,), bits_per_key)
end
function leveldb_readoptions_create()
ccall((:leveldb_readoptions_create, libleveldb), Ptr{leveldb_readoptions_t}, ())
end
function leveldb_readoptions_destroy(arg1)
ccall((:leveldb_readoptions_destroy, libleveldb), Cvoid, (Ptr{leveldb_readoptions_t},), arg1)
end
function leveldb_readoptions_set_verify_checksums(arg1, arg2)
ccall((:leveldb_readoptions_set_verify_checksums, libleveldb), Cvoid, (Ptr{leveldb_readoptions_t}, Cuchar), arg1, arg2)
end
function leveldb_readoptions_set_fill_cache(arg1, arg2)
ccall((:leveldb_readoptions_set_fill_cache, libleveldb), Cvoid, (Ptr{leveldb_readoptions_t}, Cuchar), arg1, arg2)
end
function leveldb_readoptions_set_snapshot(arg1, arg2)
ccall((:leveldb_readoptions_set_snapshot, libleveldb), Cvoid, (Ptr{leveldb_readoptions_t}, Ptr{leveldb_snapshot_t}), arg1, arg2)
end
function leveldb_writeoptions_create()
ccall((:leveldb_writeoptions_create, libleveldb), Ptr{leveldb_writeoptions_t}, ())
end
function leveldb_writeoptions_destroy(arg1)
ccall((:leveldb_writeoptions_destroy, libleveldb), Cvoid, (Ptr{leveldb_writeoptions_t},), arg1)
end
function leveldb_writeoptions_set_sync(arg1, arg2)
ccall((:leveldb_writeoptions_set_sync, libleveldb), Cvoid, (Ptr{leveldb_writeoptions_t}, Cuchar), arg1, arg2)
end
function leveldb_cache_create_lru(capacity)
ccall((:leveldb_cache_create_lru, libleveldb), Ptr{leveldb_cache_t}, (Csize_t,), capacity)
end
function leveldb_cache_destroy(cache)
ccall((:leveldb_cache_destroy, libleveldb), Cvoid, (Ptr{leveldb_cache_t},), cache)
end
function leveldb_create_default_env()
ccall((:leveldb_create_default_env, libleveldb), Ptr{leveldb_env_t}, ())
end
function leveldb_env_destroy(arg1)
ccall((:leveldb_env_destroy, libleveldb), Cvoid, (Ptr{leveldb_env_t},), arg1)
end
function leveldb_free(ptr)
ccall((:leveldb_free, libleveldb), Cvoid, (Ptr{Cvoid},), ptr)
end
function leveldb_major_version()
ccall((:leveldb_major_version, libleveldb), Cint, ())
end
function leveldb_minor_version()
ccall((:leveldb_minor_version, libleveldb), Cint, ())
end
|
proofpile-julia0005-42652 | {
"provenance": "014.jsonl.gz:242653"
} | using QuantumOptics, CollectiveSpins
const cs = CollectiveSpins
# System parameters
const a = 0.54
const γ = 1.
const e_dipole = [0,0,1.]
const T = [0:0.05:5.;]
const system = SpinCollection(cs.geometry.cube(a), e_dipole; gamma=γ)
const N = length(system.spins)
# Initial state (Bloch state)
#const phi = Float64[(i-1)*pi for i=1:N]
#const phi = [0. for i=1:N]
const phi = [0., 0., 0., 0., 0.5*pi, 0.5*pi, 0.5*pi, 0.5*pi]
const theta = ones(Float64, N)*pi/2.
# Output
const targetdir = "."
const name = "rotsym2.dat"
# Time evolution
# Independent
state0 = cs.independent.blochstate(phi, theta)
tout, state_ind_t = cs.independent.timeevolution(T, system, state0)
# Meanfield
state0 = cs.meanfield.blochstate(phi, theta)
tout, state_mf_t = cs.meanfield.timeevolution(T, system, state0)
# Symmetric meanfield
state0 = cs.meanfield.blochstate(0., pi/2., 1)
Ωeff, Γeff = CollectiveSpins.effective_interaction_rotated.cube_orthogonal(a, pi/2.)
tout, state_mfsym_t = cs.meanfield.timeevolution_symmetric(T, state0, Ωeff, Γeff)
# #println("N: ", state_mf_t[end].N)
# println(cs.meanfield.sy(state_mf_t[1]))
# println(cs.meanfield.sy(state_mf_t[2]))
# println(cs.meanfield.sy(state_mf_t[3]))
# println(cs.meanfield.sy(state_mf_t[4]))
# println(cs.meanfield.sy(state_mf_t[end]))
# #println("N: ", state_mfsym_t[end].N)
# println(cs.meanfield.sy(state_mfsym_t[1]))
# println(cs.meanfield.sy(state_mfsym_t[2]))
# println(cs.meanfield.sy(state_mfsym_t[3]))
# println(cs.meanfield.sy(state_mfsym_t[4]))
# println(cs.meanfield.sy(state_mfsym_t[end]))
# @assert false
# Meanfield + Correlations
state0 = cs.mpc.blochstate(phi, theta)
tout, state_mpc_t = cs.mpc.timeevolution(T, system, state0)
# Quantum: master equation
sx_master = Float64[]
sy_master = Float64[]
sz_master = Float64[]
td_ind = Float64[]
td_mf = Float64[]
td_mpc = Float64[]
const Ncenter = 5#int(N/2)+1
embed(op::Operator) = QuantumOptics.embed(cs.quantum.basis(system), Ncenter, op)
function fout(t, rho::Operator)
i = something(findfirst(isequal(t), T), 0)
rho_ind = cs.independent.densityoperator(state_ind_t[i])
rho_mf = cs.meanfield.densityoperator(state_mf_t[i])
rho_mpc = cs.mpc.densityoperator(state_mpc_t[i])
push!(td_ind, tracedistance(rho, rho_ind))
push!(td_mf, tracedistance(rho, rho_mf))
push!(td_mpc, tracedistance(rho, rho_mpc))
push!(sx_master, real(expect(embed(sigmax), rho)))
push!(sy_master, real(expect(embed(sigmay), rho)))
push!(sz_master, real(expect(embed(sigmaz), rho)))
end
Ψ₀ = cs.quantum.blochstate(phi,theta)
ρ₀ = Ψ₀⊗dagger(Ψ₀)
cs.quantum.timeevolution(T, system, ρ₀, fout=fout)
# Expectation values
mapexpect(op, states) = map(s->(op(s)[Ncenter]), states)
sx_ind = mapexpect(cs.independent.sx, state_ind_t)
sy_ind = mapexpect(cs.independent.sy, state_ind_t)
sz_ind = mapexpect(cs.independent.sz, state_ind_t)
sx_mf = mapexpect(cs.meanfield.sx, state_mf_t)
sy_mf = mapexpect(cs.meanfield.sy, state_mf_t)
sz_mf = mapexpect(cs.meanfield.sz, state_mf_t)
sx_mpc = mapexpect(cs.mpc.sx, state_mpc_t)
sy_mpc = mapexpect(cs.mpc.sy, state_mpc_t)
sz_mpc = mapexpect(cs.mpc.sz, state_mpc_t)
mapexpect(op, states) = map(s->(op(s)[1]), states)
sx_mfsym = mapexpect(cs.meanfield.sx, state_mfsym_t)
sy_mfsym = mapexpect(cs.meanfield.sy, state_mfsym_t)
sz_mfsym = mapexpect(cs.meanfield.sz, state_mfsym_t)
# Save data
f = open(joinpath(targetdir, name), "w")
write(f, "# Time sx_ind sy_ind sz_ind sx_mf sy_mf sz_mf sx_mfsym sy_mfsym sz_mfsym sx_mpc sy_mpc sz_mpc td_ind td_mf td_mpc\n")
for i=1:length(T)
write(f, "$(T[i]);$(sx_ind[i]);$(sy_ind[i]);$(sz_ind[i]);$(sx_mf[i]);$(sy_mf[i]);$(sz_mf[i]);$(sx_mfsym[i]);$(sy_mfsym[i]);$(sz_mfsym[i]);$(sx_mpc[i]);$(sy_mpc[i]);$(sz_mpc[i]);$(sx_master[i]);$(sy_master[i]);$(sz_master[i]);$(td_ind[i]);$(td_mf[i]);$(td_mpc[i])\n")
end
close(f) |
proofpile-julia0005-42653 | {
"provenance": "014.jsonl.gz:242654"
} | #----------------------------------------------------------------------------------------------#
# Package Defaults #
#----------------------------------------------------------------------------------------------#
"""
`const DEF = Dict{Symbol,Any}(...)`\n
`EngThermBase` defaults\n
"""
const DEF = Dict{Symbol,Any}(
# --- Bases
:IB => MA, # Default Intensive Base
:EB => SY, # Default Extensive Base
:XB => EX, # Default Exactness Base
# --- Print formatting
:pprint => true, # Whether to pretty-print AMOUNTS
:showPrec => true, # Whether to Base.show the Precision of AMOUNTS
:showSigD => 5, # Significant digits for Base.show of AMOUNTS
)
export DEF
|
proofpile-julia0005-42654 | {
"provenance": "014.jsonl.gz:242655"
} | # https://github.com/JuliaLang/Statistics.jl/pull/28
@static if VERSION < v"1.6.0-"
Statistics.middle(x::Number, y::Number) = x/2 + y/2
end
|
proofpile-julia0005-42655 | {
"provenance": "014.jsonl.gz:242656"
} |
using MLBase
using Base.Test
# standardize
X = rand(5, 8)
(Y, t) = standardize(X; center=false, scale=false)
@test isa(t, Standardize)
@test isempty(t.mean)
@test isempty(t.scale)
@test isequal(X, Y)
@test_approx_eq transform(t, X[:,1]) Y[:,1]
(Y, t) = standardize(X; center=false, scale=true)
@test isa(t, Standardize)
@test isempty(t.mean)
@test length(t.scale) == 5
s = sqrt(sum(abs2(X), 2) ./ (8 - 1))
@test_approx_eq Y X ./ s
@test_approx_eq transform(t, X[:,1]) Y[:,1]
(Y, t) = standardize(X; center=true, scale=false)
@test isa(t, Standardize)
@test length(t.mean) == 5
@test isempty(t.scale)
@test_approx_eq Y X .- mean(X, 2)
@test_approx_eq transform(t, X[:,1]) Y[:,1]
(Y, t) = standardize(X; center=true, scale=true)
@test isa(t, Standardize)
@test length(t.mean) == 5
@test length(t.scale) == 5
@test_approx_eq Y (X .- mean(X, 2)) ./ std(X, 2)
@test_approx_eq transform(t, X[:,1]) Y[:,1]
|
proofpile-julia0005-42656 | {
"provenance": "014.jsonl.gz:242657"
} | export edge_color, edge_chromatic_number
"""
`edge_color(G,k)` returns a proper `k`-edge coloring of `G` or
throws an error if one does not exist.
"""
function edge_color(G::SimpleGraph, k::Int)
err_msg = "This graph is not $k-edge colorable"
Delta = maximum(deg(G))
if k<Delta
error(err_msg)
end
M = max_matching(G) # this isn't that expensive
if NE(G) > length(M)*k
error(err_msg)
end
VV = vlist(G)
EE = elist(G)
n = NV(G)
m = NE(G)
VT = vertex_type(G)
ET = Tuple{VT,VT}
result = Dict{ET,Int}()
MOD = Model(with_optimizer(_SOLVER.Optimizer; _OPTS...))
@variable(MOD, x[EE,1:k], Bin)
# Every edge must have exactly one color
for ed in EE
@constraint(MOD, sum(x[ed,i] for i=1:k) == 1)
end
# Two edges incident with the same vertex must have different colors
for i=1:m-1
for j=i+1:m
e1 = EE[i]
e2 = EE[j]
meet = intersect(Set(e1), Set(e2))
if length(meet)>0
for t=1:k
@constraint(MOD, x[e1,t]+x[e2,t] <= 1)
end
end
end
end
optimize!(MOD)
status = Int(termination_status(MOD))
if status != 1
error(err_msg)
end
X = value.(x)
for ee in EE
for t=1:k
if X[ee,t] > 0
result[ee] = t
end
end
end
return result
end
"""
`edge_chromatic_number(G)` returns the edge chromatic number
of the graph `G`.
"""
function edge_chromatic_number(G::SimpleGraph)::Int
if cache_check(G,:edge_chromatic_number)
return cache_recall(G,:edge_chromatic_number)
end
d = maximum(deg(G))
try
f = edge_color(G,d)
cache_save(G, :edge_chromatic_number, d)
return d
catch
end
cache_save(G, :edge_chromatic_number, d+1)
return d+1
end
|
proofpile-julia0005-42657 | {
"provenance": "014.jsonl.gz:242658"
} | module TidalSedimentFluxes
using TidalFluxQuantities, TidalFluxCalibrations
export OBS3, AnalogLow, AnalogHigh, Turbidity, TSS
include("obs.jl")
include("tss.jl")
end # module
|
proofpile-julia0005-42658 | {
"provenance": "014.jsonl.gz:242659"
} | @symbol_func function V_loop(cur_reactor::AbstractReactor)
cur_V = R_P(cur_reactor)
cur_V *= cur_reactor.I_P
cur_V *= f_IN(cur_reactor)
cur_V *= 1e6
cur_V
end
|
proofpile-julia0005-42659 | {
"provenance": "014.jsonl.gz:242660"
} | @testset "Ambiguity detection" begin
@test isempty(Test.detect_ambiguities(StatsModels))
end
|
proofpile-julia0005-42660 | {
"provenance": "014.jsonl.gz:242661"
} | # This file was generated, do not modify it. # hide
r = report(mtm)
res = r.plotting
md = res.parameter_values[:,1]
mcw = res.parameter_values[:,2]
figure(figsize=(8,6))
tricontourf(md, mcw, res.measurements)
xlabel("Maximum tree depth", fontsize=14)
ylabel("Minimum child weight", fontsize=14)
xticks(3:2:10, fontsize=12)
yticks(fontsize=12)
savefig(joinpath(@OUTPUT, "EX-crabs-xgb-heatmap.svg")) # hide |
proofpile-julia0005-42661 | {
"provenance": "014.jsonl.gz:242662"
} | @testset "1.1.3.6 (g x)^m (a+b x^n)^p (c+d x^n)^q (e+f x^n)^r" begin
(A, B, a, b, c, d, e, m, n, p, q, x, ) = @variables A B a b c d e m n p q x
#= ::Package:: =#
#= ::Title:: =#
#=Integrands*of*the*form*(g*x)^m*(a+b*x^n)^p*(c+d*x^n)^q*(e+f*x^n)^r=#
#= ::Section::Closed:: =#
#=Integrands*of*the*form*(e*x)^m*(a+b*x^n)^p*(c+d*x^n)^q*(A+B*x^n)=#
#= ::Subsection::Closed:: =#
#=Integrands*of*the*form*(e*x)^m*(a+b*x^n)^p*(c+d*x^n)^q*(A+B*x^n)=#
#= ::Subsubsection::Closed:: =#
#=q>0=#
@test_int [(e*x)^m*(a + b*x^n)^3*(c + d*x^n)*(A + B*x^n), x, 12, (a^2*(3*A*b*c + a*B*c + a*A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + (a*(3*A*b*(b*c + a*d) + a*B*(3*b*c + a*d))*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + (b*(3*a*B*(b*c + a*d) + A*b*(b*c + 3*a*d))*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (b^2*(b*B*c + A*b*d + 3*a*B*d)*x^(1 + 4*n)*(e*x)^m)/(1 + m + 4*n) + (b^3*B*d*x^(1 + 5*n)*(e*x)^m)/(1 + m + 5*n) + (a^3*A*c*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^2*(c + d*x^n)*(A + B*x^n), x, 10, (a*(2*A*b*c + a*B*c + a*A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + ((a*B*(2*b*c + a*d) + A*b*(b*c + 2*a*d))*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + (b*(b*B*c + A*b*d + 2*a*B*d)*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (b^2*B*d*x^(1 + 4*n)*(e*x)^m)/(1 + m + 4*n) + (a^2*A*c*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^1*(c + d*x^n)*(A + B*x^n), x, 8, ((A*b*c + a*B*c + a*A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + ((b*B*c + A*b*d + a*B*d)*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + (b*B*d*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (a*A*c*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^0*(c + d*x^n)*(A + B*x^n), x, 6, ((B*c + A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + (B*d*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + (A*c*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m/(a + b*x^n)^1*(c + d*x^n)*(A + B*x^n), x, 5, (B*d*x^(1 + n)*(e*x)^m)/(b*(1 + m + n)) + ((b*B*c + A*b*d - a*B*d)*(e*x)^(1 + m))/(b^2*e*(1 + m)) + ((A*b - a*B)*(b*c - a*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a*b^2*e*(1 + m))]
@test_int [(e*x)^m/(a + b*x^n)^2*(c + d*x^n)*(A + B*x^n), x, 3, -((d*(A*b*(1 + m) - a*B*(1 + m + n))*(e*x)^(1 + m))/(a*b^2*e*(1 + m)*n)) + ((A*b - a*B)*(e*x)^(1 + m)*(c + d*x^n))/(a*b*e*n*(a + b*x^n)) + ((b*c*(a*B*(1 + m) - A*b*(1 + m - n)) + a*d*(A*b*(1 + m) - a*B*(1 + m + n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^2*b^2*e*(1 + m)*n)]
@test_int [(e*x)^m/(a + b*x^n)^3*(c + d*x^n)*(A + B*x^n), x, 3, -(((A*b*(b*c*(1 + m - 2*n) - a*d*(1 + m - n)) - a*B*(b*c*(1 + m) - a*d*(1 + m + n)))*(e*x)^(1 + m))/(2*a^2*b^2*e*n^2*(a + b*x^n))) + ((A*b - a*B)*(e*x)^(1 + m)*(c + d*x^n))/(2*a*b*e*n*(a + b*x^n)^2) - ((b*c*(a*B*(1 + m) - A*b*(1 + m - 2*n))*(1 + m - n) + a*d*(1 + m)*(A*b*(1 + m - n) - a*B*(1 + m + n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(2*a^3*b^2*e*(1 + m)*n^2)]
@test_int [(e*x)^m*(a + b*x^n)^3*(c + d*x^n)^2*(A + B*x^n), x, 14, (a^2*c*(3*A*b*c + a*B*c + 2*a*A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + (a*(a*B*c*(3*b*c + 2*a*d) + A*(3*b^2*c^2 + 6*a*b*c*d + a^2*d^2))*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + ((a*B*(3*b^2*c^2 + 6*a*b*c*d + a^2*d^2) + A*b*(b^2*c^2 + 6*a*b*c*d + 3*a^2*d^2))*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (b*(3*a^2*B*d^2 + 3*a*b*d*(2*B*c + A*d) + b^2*c*(B*c + 2*A*d))*x^(1 + 4*n)*(e*x)^m)/(1 + m + 4*n) + (b^2*d*(2*b*B*c + A*b*d + 3*a*B*d)*x^(1 + 5*n)*(e*x)^m)/(1 + m + 5*n) + (b^3*B*d^2*x^(1 + 6*n)*(e*x)^m)/(1 + m + 6*n) + (a^3*A*c^2*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^2*(c + d*x^n)^2*(A + B*x^n), x, 12, (a*c*(a*B*c + 2*A*(b*c + a*d))*x^(1 + n)*(e*x)^m)/(1 + m + n) + ((2*a*B*c*(b*c + a*d) + A*(b^2*c^2 + 4*a*b*c*d + a^2*d^2))*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + ((a^2*B*d^2 + 2*a*b*d*(2*B*c + A*d) + b^2*c*(B*c + 2*A*d))*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (b*d*(2*b*B*c + A*b*d + 2*a*B*d)*x^(1 + 4*n)*(e*x)^m)/(1 + m + 4*n) + (b^2*B*d^2*x^(1 + 5*n)*(e*x)^m)/(1 + m + 5*n) + (a^2*A*c^2*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^1*(c + d*x^n)^2*(A + B*x^n), x, 10, (c*(A*b*c + a*B*c + 2*a*A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + ((a*d*(2*B*c + A*d) + b*c*(B*c + 2*A*d))*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + (d*(2*b*B*c + A*b*d + a*B*d)*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (b*B*d^2*x^(1 + 4*n)*(e*x)^m)/(1 + m + 4*n) + (a*A*c^2*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^0*(c + d*x^n)^2*(A + B*x^n), x, 8, (c*(B*c + 2*A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + (d*(2*B*c + A*d)*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + (B*d^2*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (A*c^2*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m/(a + b*x^n)^1*(c + d*x^n)^2*(A + B*x^n), x, 7, (d*(2*b*B*c + A*b*d - a*B*d)*x^(1 + n)*(e*x)^m)/(b^2*(1 + m + n)) + (B*d^2*x^(1 + 2*n)*(e*x)^m)/(b*(1 + m + 2*n)) + ((a^2*B*d^2 - a*b*d*(2*B*c + A*d) + b^2*c*(B*c + 2*A*d))*(e*x)^(1 + m))/(b^3*e*(1 + m)) + ((A*b - a*B)*(b*c - a*d)^2*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a*b^3*e*(1 + m))]
@test_int [(e*x)^m/(a + b*x^n)^2*(c + d*x^n)^2*(A + B*x^n), x, 6, -((d^2*(A*b*(1 + m + n) - a*B*(1 + m + 2*n))*x^(1 + n)*(e*x)^m)/(a*b^2*n*(1 + m + n))) - (d*(A*b*(2*b*c*(1 + m) - a*d*(1 + m + n)) - a*B*(2*b*c*(1 + m + n) - a*d*(1 + m + 2*n)))*(e*x)^(1 + m))/(a*b^3*e*(1 + m)*n) + ((A*b - a*B)*(e*x)^(1 + m)*(c + d*x^n)^2)/(a*b*e*n*(a + b*x^n)) - ((b*c - a*d)*(A*b*(b*c*(1 + m - n) - a*d*(1 + m + n)) - a*B*(b*c*(1 + m) - a*d*(1 + m + 2*n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^2*b^3*e*(1 + m)*n)]
@test_int [(e*x)^m/(a + b*x^n)^3*(c + d*x^n)^2*(A + B*x^n), x, 4, (d*(b*c*(1 + m) - a*d*(1 + m + n))*(A*b*(1 + m) - a*B*(1 + m + 2*n))*(e*x)^(1 + m))/(2*a^2*b^3*e*(1 + m)*n^2) + ((A*b - a*B)*(e*x)^(1 + m)*(c + d*x^n)^2)/(2*a*b*e*n*(a + b*x^n)^2) + ((b*c - a*d)*(e*x)^(1 + m)*(c*(a*B*(1 + m) - A*b*(1 + m - 2*n)) - d*(A*b*(1 + m) - a*B*(1 + m + 2*n))*x^n))/(2*a^2*b^2*e*n^2*(a + b*x^n)) + ((b*c*(a*B*(1 + m) - A*b*(1 + m - 2*n))*(a*d*(1 + m) - b*c*(1 + m - n)) - a*d*(b*c*(1 + m) - a*d*(1 + m + n))*(A*b*(1 + m) - a*B*(1 + m + 2*n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(2*a^3*b^3*e*(1 + m)*n^2)]
@test_int [(e*x)^m*(a + b*x^n)^3*(c + d*x^n)^3*(A + B*x^n), x, 16, (a^2*c^2*(a*B*c + 3*A*(b*c + a*d))*x^(1 + n)*(e*x)^m)/(1 + m + n) + (3*a*c*(a*B*c*(b*c + a*d) + A*(b^2*c^2 + 3*a*b*c*d + a^2*d^2))*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + ((3*a*B*c*(b^2*c^2 + 3*a*b*c*d + a^2*d^2) + A*(b^3*c^3 + 9*a*b^2*c^2*d + 9*a^2*b*c*d^2 + a^3*d^3))*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + ((a^3*B*d^3 + 9*a*b^2*c*d*(B*c + A*d) + 3*a^2*b*d^2*(3*B*c + A*d) + b^3*c^2*(B*c + 3*A*d))*x^(1 + 4*n)*(e*x)^m)/(1 + m + 4*n) + (3*b*d*(a^2*B*d^2 + b^2*c*(B*c + A*d) + a*b*d*(3*B*c + A*d))*x^(1 + 5*n)*(e*x)^m)/(1 + m + 5*n) + (b^2*d^2*(3*b*B*c + A*b*d + 3*a*B*d)*x^(1 + 6*n)*(e*x)^m)/(1 + m + 6*n) + (b^3*B*d^3*x^(1 + 7*n)*(e*x)^m)/(1 + m + 7*n) + (a^3*A*c^3*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^2*(c + d*x^n)^3*(A + B*x^n), x, 14, (a*c^2*(2*A*b*c + a*B*c + 3*a*A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + (c*(a*B*c*(2*b*c + 3*a*d) + A*(b^2*c^2 + 6*a*b*c*d + 3*a^2*d^2))*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + ((6*a*b*c*d*(B*c + A*d) + a^2*d^2*(3*B*c + A*d) + b^2*c^2*(B*c + 3*A*d))*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (d*(a^2*B*d^2 + 3*b^2*c*(B*c + A*d) + 2*a*b*d*(3*B*c + A*d))*x^(1 + 4*n)*(e*x)^m)/(1 + m + 4*n) + (b*d^2*(3*b*B*c + A*b*d + 2*a*B*d)*x^(1 + 5*n)*(e*x)^m)/(1 + m + 5*n) + (b^2*B*d^3*x^(1 + 6*n)*(e*x)^m)/(1 + m + 6*n) + (a^2*A*c^3*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^1*(c + d*x^n)^3*(A + B*x^n), x, 12, (c^2*(A*b*c + a*B*c + 3*a*A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + (c*(3*a*d*(B*c + A*d) + b*c*(B*c + 3*A*d))*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + (d*(3*b*c*(B*c + A*d) + a*d*(3*B*c + A*d))*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (d^2*(3*b*B*c + A*b*d + a*B*d)*x^(1 + 4*n)*(e*x)^m)/(1 + m + 4*n) + (b*B*d^3*x^(1 + 5*n)*(e*x)^m)/(1 + m + 5*n) + (a*A*c^3*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^0*(c + d*x^n)^3*(A + B*x^n), x, 10, (c^2*(B*c + 3*A*d)*x^(1 + n)*(e*x)^m)/(1 + m + n) + (3*c*d*(B*c + A*d)*x^(1 + 2*n)*(e*x)^m)/(1 + m + 2*n) + (d^2*(3*B*c + A*d)*x^(1 + 3*n)*(e*x)^m)/(1 + m + 3*n) + (B*d^3*x^(1 + 4*n)*(e*x)^m)/(1 + m + 4*n) + (A*c^3*(e*x)^(1 + m))/(e*(1 + m))]
@test_int [(e*x)^m/(a + b*x^n)^1*(c + d*x^n)^3*(A + B*x^n), x, 9, (d*(a^2*B*d^2 + 3*b^2*c*(B*c + A*d) - a*b*d*(3*B*c + A*d))*x^(1 + n)*(e*x)^m)/(b^3*(1 + m + n)) + (d^2*(3*b*B*c + A*b*d - a*B*d)*x^(1 + 2*n)*(e*x)^m)/(b^2*(1 + m + 2*n)) + (B*d^3*x^(1 + 3*n)*(e*x)^m)/(b*(1 + m + 3*n)) - ((a^3*B*d^3 + 3*a*b^2*c*d*(B*c + A*d) - a^2*b*d^2*(3*B*c + A*d) - b^3*c^2*(B*c + 3*A*d))*(e*x)^(1 + m))/(b^4*e*(1 + m)) + ((A*b - a*B)*(b*c - a*d)^3*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a*b^4*e*(1 + m))]
@test_int [(e*x)^m/(a + b*x^n)^2*(c + d*x^n)^3*(A + B*x^n), x, 8, -((d^2*(A*b*(3*b*c*(1 + m + n) - a*d*(1 + m + 2*n)) - a*B*(3*b*c*(1 + m + 2*n) - a*d*(1 + m + 3*n)))*x^(1 + n)*(e*x)^m)/(a*b^3*n*(1 + m + n))) - (d^3*(A*b*(1 + m + 2*n) - a*B*(1 + m + 3*n))*x^(1 + 2*n)*(e*x)^m)/(a*b^2*n*(1 + m + 2*n)) - (d*(A*b*(3*b^2*c^2*(1 + m) - 3*a*b*c*d*(1 + m + n) + a^2*d^2*(1 + m + 2*n)) - a*B*(3*b^2*c^2*(1 + m + n) - 3*a*b*c*d*(1 + m + 2*n) + a^2*d^2*(1 + m + 3*n)))*(e*x)^(1 + m))/(a*b^4*e*(1 + m)*n) + ((A*b - a*B)*(e*x)^(1 + m)*(c + d*x^n)^3)/(a*b*e*n*(a + b*x^n)) - ((b*c - a*d)^2*(A*b*(b*c*(1 + m - n) - a*d*(1 + m + 2*n)) - a*B*(b*c*(1 + m) - a*d*(1 + m + 3*n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^2*b^4*e*(1 + m)*n)]
#= [(e*x)^m/(a + b*x^n)^3*(c + d*x^n)^3*(A + B*x^n), x, 7, (B*d^3*x^(1 + n)*(e*x)^m)/(b^3*(1 + m + n)) + (d^2*(3*b*B*c + A*b*d - 3*a*B*d)*(e*x)^(1 + m))/(b^4*e*(1 + m)) + (3*d*(b*c - a*d)*(b*B*c + A*b*d - 2*a*B*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a*b^4*e*(1 + m)) + ((b*c - a*d)^2*(b*B*c + 3*A*b*d - 4*a*B*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(2, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^2*b^4*e*(1 + m)) + ((A*b - a*B)*(b*c - a*d)^3*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(3, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^3*b^4*e*(1 + m))]=#
#= ::Subsubsection::Closed:: =#
#=q<0=#
@test_int [(e*x)^m*(a + b*x^n)^4*(A + B*x^n)/(c + d*x^n), x, 11, (b*(4*a^3*B*d^3 - b^3*c^2*(B*c - A*d) + 4*a*b^2*c*d*(B*c - A*d) - 6*a^2*b*d^2*(B*c - A*d))*x^(1 + n)*(e*x)^m)/(d^4*(1 + m + n)) + (b^2*(6*a^2*B*d^2 + b^2*c*(B*c - A*d) - 4*a*b*d*(B*c - A*d))*x^(1 + 2*n)*(e*x)^m)/(d^3*(1 + m + 2*n)) - (b^3*(b*B*c - A*b*d - 4*a*B*d)*x^(1 + 3*n)*(e*x)^m)/(d^2*(1 + m + 3*n)) + (b^4*B*x^(1 + 4*n)*(e*x)^m)/(d*(1 + m + 4*n)) + ((a^4*B*d^4 + b^4*c^3*(B*c - A*d) - 4*a*b^3*c^2*d*(B*c - A*d) + 6*a^2*b^2*c*d^2*(B*c - A*d) - 4*a^3*b*d^3*(B*c - A*d))*(e*x)^(1 + m))/(d^5*e*(1 + m)) - ((b*c - a*d)^4*(B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*d^5*e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^3*(A + B*x^n)/(c + d*x^n), x, 9, (b*(3*a^2*B*d^2 + b^2*c*(B*c - A*d) - 3*a*b*d*(B*c - A*d))*x^(1 + n)*(e*x)^m)/(d^3*(1 + m + n)) - (b^2*(b*B*c - A*b*d - 3*a*B*d)*x^(1 + 2*n)*(e*x)^m)/(d^2*(1 + m + 2*n)) + (b^3*B*x^(1 + 3*n)*(e*x)^m)/(d*(1 + m + 3*n)) + ((a^3*B*d^3 - b^3*c^2*(B*c - A*d) + 3*a*b^2*c*d*(B*c - A*d) - 3*a^2*b*d^2*(B*c - A*d))*(e*x)^(1 + m))/(d^4*e*(1 + m)) + ((b*c - a*d)^3*(B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*d^4*e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^2*(A + B*x^n)/(c + d*x^n), x, 7, -((b*(b*B*c - A*b*d - 2*a*B*d)*x^(1 + n)*(e*x)^m)/(d^2*(1 + m + n))) + (b^2*B*x^(1 + 2*n)*(e*x)^m)/(d*(1 + m + 2*n)) + ((a^2*B*d^2 + b^2*c*(B*c - A*d) - 2*a*b*d*(B*c - A*d))*(e*x)^(1 + m))/(d^3*e*(1 + m)) - ((b*c - a*d)^2*(B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*d^3*e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^1*(A + B*x^n)/(c + d*x^n), x, 5, (b*B*x^(1 + n)*(e*x)^m)/(d*(1 + m + n)) - ((b*B*c - A*b*d - a*B*d)*(e*x)^(1 + m))/(d^2*e*(1 + m)) + ((b*c - a*d)*(B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*d^2*e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^0*(A + B*x^n)/(c + d*x^n), x, 2, (B*(e*x)^(1 + m))/(d*e*(1 + m)) - ((B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*d*e*(1 + m))]
@test_int [(e*x)^m/(a + b*x^n)^1*(A + B*x^n)/(c + d*x^n), x, 4, ((A*b - a*B)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a*(b*c - a*d)*e*(1 + m)) + ((B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*(b*c - a*d)*e*(1 + m))]
@test_int [(e*x)^m/(a + b*x^n)^2*(A + B*x^n)/(c + d*x^n), x, 5, ((A*b - a*B)*(e*x)^(1 + m))/(a*(b*c - a*d)*e*n*(a + b*x^n)) + ((A*b*(a*d*(1 + m - 2*n) - b*c*(1 + m - n)) + a*B*(b*c*(1 + m) - a*d*(1 + m - n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^2*(b*c - a*d)^2*e*(1 + m)*n) - (d*(B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*(b*c - a*d)^2*e*(1 + m))]
@test_int [(e*x)^m/(a + b*x^n)^3*(A + B*x^n)/(c + d*x^n), x, 6, ((A*b - a*B)*(e*x)^(1 + m))/(2*a*(b*c - a*d)*e*n*(a + b*x^n)^2) + ((A*b*(a*d*(1 + m - 4*n) - b*c*(1 + m - 2*n)) + a*B*(b*c*(1 + m) - a*d*(1 + m - 2*n)))*(e*x)^(1 + m))/(2*a^2*(b*c - a*d)^2*e*n^2*(a + b*x^n)) + ((a*B*(2*a*b*c*d*(1 + m)*(1 + m - 2*n) - b^2*c^2*(1 + m)*(1 + m - n) - a^2*d^2*(1 + m^2 + m*(2 - 3*n) - 3*n + 2*n^2)) + A*b*(b^2*c^2*(1 + m^2 + m*(2 - 3*n) - 3*n + 2*n^2) - 2*a*b*c*d*(1 + m^2 + m*(2 - 4*n) - 4*n + 3*n^2) + a^2*d^2*(1 + m^2 + m*(2 - 5*n) - 5*n + 6*n^2)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(2*a^3*(b*c - a*d)^3*e*(1 + m)*n^2) + (d^2*(B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*(b*c - a*d)^3*e*(1 + m))]
@test_int [(e*x)^m*(a + b*x^n)^3*(A + B*x^n)/(c + d*x^n)^2, x, 8, -((b^2*(3*a*d*(A*d*(1 + m + n) - B*c*(1 + m + 2*n)) - b*c*(A*d*(1 + m + 2*n) - B*c*(1 + m + 3*n)))*x^(1 + n)*(e*x)^m)/(c*d^3*n*(1 + m + n))) - (b^3*(A*d*(1 + m + 2*n) - B*c*(1 + m + 3*n))*x^(1 + 2*n)*(e*x)^m)/(c*d^2*n*(1 + m + 2*n)) - (b*(3*a^2*d^2*(A*d*(1 + m) - B*c*(1 + m + n)) - 3*a*b*c*d*(A*d*(1 + m + n) - B*c*(1 + m + 2*n)) + b^2*c^2*(A*d*(1 + m + 2*n) - B*c*(1 + m + 3*n)))*(e*x)^(1 + m))/(c*d^4*e*(1 + m)*n) - ((B*c - A*d)*(e*x)^(1 + m)*(a + b*x^n)^3)/(c*d*e*n*(c + d*x^n)) + ((b*c - a*d)^2*(a*d*(B*c*(1 + m) - A*d*(1 + m - n)) + b*c*(A*d*(1 + m + 2*n) - B*c*(1 + m + 3*n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^2*d^4*e*(1 + m)*n)]
@test_int [(e*x)^m*(a + b*x^n)^2*(A + B*x^n)/(c + d*x^n)^2, x, 6, -((b^2*(A*d*(1 + m + n) - B*c*(1 + m + 2*n))*x^(1 + n)*(e*x)^m)/(c*d^2*n*(1 + m + n))) - (b*(2*a*d*(A*d*(1 + m) - B*c*(1 + m + n)) - b*c*(A*d*(1 + m + n) - B*c*(1 + m + 2*n)))*(e*x)^(1 + m))/(c*d^3*e*(1 + m)*n) - ((B*c - A*d)*(e*x)^(1 + m)*(a + b*x^n)^2)/(c*d*e*n*(c + d*x^n)) - ((b*c - a*d)*(a*d*(B*c*(1 + m) - A*d*(1 + m - n)) + b*c*(A*d*(1 + m + n) - B*c*(1 + m + 2*n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^2*d^3*e*(1 + m)*n)]
@test_int [(e*x)^m*(a + b*x^n)^1*(A + B*x^n)/(c + d*x^n)^2, x, 3, -((B*(a*d*(1 + m) - b*c*(1 + m + n))*(e*x)^(1 + m))/(c*d^2*e*(1 + m)*n)) - ((b*c - a*d)*(e*x)^(1 + m)*(A + B*x^n))/(c*d*e*n*(c + d*x^n)) + ((A*d*(b*c*(1 + m) - a*d*(1 + m - n)) + B*c*(a*d*(1 + m) - b*c*(1 + m + n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^2*d^2*e*(1 + m)*n)]
@test_int [(e*x)^m*(a + b*x^n)^0*(A + B*x^n)/(c + d*x^n)^2, x, 2, -(((B*c - A*d)*(e*x)^(1 + m))/(c*d*e*n*(c + d*x^n))) + ((B*c*(1 + m) - A*d*(1 + m - n))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^2*d*e*(1 + m)*n)]
@test_int [(e*x)^m/(a + b*x^n)^1*(A + B*x^n)/(c + d*x^n)^2, x, 5, ((B*c - A*d)*(e*x)^(1 + m))/(c*(b*c - a*d)*e*n*(c + d*x^n)) + (b*(A*b - a*B)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a*(b*c - a*d)^2*e*(1 + m)) + ((b*c*(A*d*(1 + m - 2*n) - B*c*(1 + m - n)) + a*d*(B*c*(1 + m) - A*d*(1 + m - n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^2*(b*c - a*d)^2*e*(1 + m)*n)]
@test_int [(e*x)^m/(a + b*x^n)^2*(A + B*x^n)/(c + d*x^n)^2, x, 6, (d*(A*b*c - 2*a*B*c + a*A*d)*(e*x)^(1 + m))/(a*c*(b*c - a*d)^2*e*n*(c + d*x^n)) + ((A*b - a*B)*(e*x)^(1 + m))/(a*(b*c - a*d)*e*n*(a + b*x^n)*(c + d*x^n)) + (b*(a*B*(b*c*(1 + m) - a*d*(1 + m - 2*n)) + A*b*(a*d*(1 + m - 3*n) - b*c*(1 + m - n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^2*(b*c - a*d)^3*e*(1 + m)*n) - (d*(b*c*(A*d*(1 + m - 3*n) - B*c*(1 + m - 2*n)) + a*d*(B*c*(1 + m) - A*d*(1 + m - n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^2*(b*c - a*d)^3*e*(1 + m)*n)]
@test_int [(e*x)^m/(a + b*x^n)^3*(A + B*x^n)/(c + d*x^n)^2, x, 7, (d*(a*B*c*(b*c*(1 + m) - a*d*(1 + m - 6*n)) + A*(a*b*c*d*(1 + m - 6*n) - b^2*c^2*(1 + m - 2*n) - 2*a^2*d^2*n))*(e*x)^(1 + m))/(2*a^2*c*(b*c - a*d)^3*e*n^2*(c + d*x^n)) + ((A*b - a*B)*(e*x)^(1 + m))/(2*a*(b*c - a*d)*e*n*(a + b*x^n)^2*(c + d*x^n)) + ((a*B*(b*c*(1 + m) - a*d*(1 + m - 3*n)) + A*b*(a*d*(1 + m - 5*n) - b*c*(1 + m - 2*n)))*(e*x)^(1 + m))/(2*a^2*(b*c - a*d)^2*e*n^2*(a + b*x^n)*(c + d*x^n)) + (b*(a*B*(2*a*b*c*d*(1 + m)*(1 + m - 3*n) - b^2*c^2*(1 + m)*(1 + m - n) - a^2*d^2*(1 + m^2 + m*(2 - 5*n) - 5*n + 6*n^2)) + A*b*(b^2*c^2*(1 + m^2 + m*(2 - 3*n) - 3*n + 2*n^2) - 2*a*b*c*d*(1 + m^2 + m*(2 - 5*n) - 5*n + 4*n^2) + a^2*d^2*(1 + m^2 + m*(2 - 7*n) - 7*n + 12*n^2)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(2*a^3*(b*c - a*d)^4*e*(1 + m)*n^2) + (d^2*(b*c*(A*d*(1 + m - 4*n) - B*c*(1 + m - 3*n)) + a*d*(B*c*(1 + m) - A*d*(1 + m - n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^2*(b*c - a*d)^4*e*(1 + m)*n)]
#= [(e*x)^m*(a + b*x^n)^3*(A + B*x^n)/(c + d*x^n)^3, x, 7, (b^3*B*x^(1 + n)*(e*x)^m)/(d^3*(1 + m + n)) - (b^2*(3*b*B*c - A*b*d - 3*a*B*d)*(e*x)^(1 + m))/(d^4*e*(1 + m)) + (3*b*(b*c - a*d)*(2*b*B*c - A*b*d - a*B*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*d^4*e*(1 + m)) - ((b*c - a*d)^2*(4*b*B*c - 3*A*b*d - a*B*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(2, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^2*d^4*e*(1 + m)) + ((b*c - a*d)^3*(B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(3, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^3*d^4*e*(1 + m))] =#
@test_int [(e*x)^m*(a + b*x^n)^2*(A + B*x^n)/(c + d*x^n)^3, x, 4, (b*(a*d*(1 + m) - b*c*(1 + m + n))*(A*d*(1 + m) - B*c*(1 + m + 2*n))*(e*x)^(1 + m))/(2*c^2*d^3*e*(1 + m)*n^2) - ((B*c - A*d)*(e*x)^(1 + m)*(a + b*x^n)^2)/(2*c*d*e*n*(c + d*x^n)^2) - ((b*c - a*d)*(e*x)^(1 + m)*(a*(B*c*(1 + m) - A*d*(1 + m - 2*n)) - b*(A*d*(1 + m) - B*c*(1 + m + 2*n))*x^n))/(2*c^2*d^2*e*n^2*(c + d*x^n)) + ((a*d*(B*c*(1 + m) - A*d*(1 + m - 2*n))*(b*c*(1 + m) - a*d*(1 + m - n)) - b*c*(a*d*(1 + m) - b*c*(1 + m + n))*(A*d*(1 + m) - B*c*(1 + m + 2*n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(2*c^3*d^3*e*(1 + m)*n^2)]
@test_int [(e*x)^m*(a + b*x^n)^1*(A + B*x^n)/(c + d*x^n)^3, x, 3, -(((b*c - a*d)*(e*x)^(1 + m)*(A + B*x^n))/(2*c*d*e*n*(c + d*x^n)^2)) - ((a*d*(A*d*(1 + m - 2*n) - B*c*(1 + m - n)) - b*c*(A*d*(1 + m) - B*c*(1 + m + n)))*(e*x)^(1 + m))/(2*c^2*d^2*e*n^2*(c + d*x^n)) - ((A*d*(b*c*(1 + m) - a*d*(1 + m - 2*n))*(1 + m - n) + B*c*(1 + m)*(a*d*(1 + m - n) - b*c*(1 + m + n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(2*c^3*d^2*e*(1 + m)*n^2)]
@test_int [(e*x)^m*(a + b*x^n)^0*(A + B*x^n)/(c + d*x^n)^3, x, 2, -(((B*c - A*d)*(e*x)^(1 + m))/(2*c*d*e*n*(c + d*x^n)^2)) + ((B*c*(1 + m) - A*d*(1 + m - 2*n))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(2, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(2*c^3*d*e*(1 + m)*n)]
@test_int [(e*x)^m/(a + b*x^n)^1*(A + B*x^n)/(c + d*x^n)^3, x, 6, ((B*c - A*d)*(e*x)^(1 + m))/(2*c*(b*c - a*d)*e*n*(c + d*x^n)^2) + ((b*c*(A*d*(1 + m - 4*n) - B*c*(1 + m - 2*n)) + a*d*(B*c*(1 + m) - A*d*(1 + m - 2*n)))*(e*x)^(1 + m))/(2*c^2*(b*c - a*d)^2*e*n^2*(c + d*x^n)) + (b^2*(A*b - a*B)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a*(b*c - a*d)^3*e*(1 + m)) - ((b^2*c^2*(A*d*(1 + m - 3*n) - B*c*(1 + m - n))*(1 + m - 2*n) - a^2*d^2*(B*c*(1 + m) - A*d*(1 + m - 2*n))*(1 + m - n) + 2*a*b*c*d*(B*c*(1 + m)*(1 + m - 2*n) - A*d*(1 + m^2 + m*(2 - 4*n) - 4*n + 3*n^2)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(2*c^3*(b*c - a*d)^3*e*(1 + m)*n^2)]
@test_int [(e*x)^m/(a + b*x^n)^2*(A + B*x^n)/(c + d*x^n)^3, x, 7, (d*(2*A*b*c - 3*a*B*c + a*A*d)*(e*x)^(1 + m))/(2*a*c*(b*c - a*d)^2*e*n*(c + d*x^n)^2) + ((A*b - a*B)*(e*x)^(1 + m))/(a*(b*c - a*d)*e*n*(a + b*x^n)*(c + d*x^n)^2) - (d*(a^2*d*(B*c*(1 + m) - A*d*(1 + m - 2*n)) - a*b*c*(B*c - A*d)*(1 + m - 6*n) - 2*A*b^2*c^2*n)*(e*x)^(1 + m))/(2*a*c^2*(b*c - a*d)^3*e*n^2*(c + d*x^n)) + (b^2*(a*B*(b*c*(1 + m) - a*d*(1 + m - 3*n)) + A*b*(a*d*(1 + m - 4*n) - b*c*(1 + m - n)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^2*(b*c - a*d)^4*e*(1 + m)*n) + (d*(b^2*c^2*(A*d*(1 + m - 4*n) - B*c*(1 + m - 2*n))*(1 + m - 3*n) - a^2*d^2*(B*c*(1 + m) - A*d*(1 + m - 2*n))*(1 + m - n) + 2*a*b*c*d*(B*c*(1 + m)*(1 + m - 3*n) - A*d*(1 + m^2 + m*(2 - 5*n) - 5*n + 4*n^2)))*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(2*c^3*(b*c - a*d)^4*e*(1 + m)*n^2)]
#= [(e*x)^m/(a + b*x^n)^3*(A + B*x^n)/(c + d*x^n)^3, x, 8, -((3*b^2*d*(b*B*c - 2*A*b*d + a*B*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a*(b*c - a*d)^5*e*(1 + m))) + (3*b*d^2*(b*B*c - 2*A*b*d + a*B*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(1, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c*(b*c - a*d)^5*e*(1 + m)) + (b^2*(b*B*c - 3*A*b*d + 2*a*B*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(2, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^2*(b*c - a*d)^4*e*(1 + m)) + (d^2*(2*b*B*c - 3*A*b*d + a*B*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(2, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^2*(b*c - a*d)^4*e*(1 + m)) + (b^2*(A*b - a*B)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(3, (1 + m)/n, (1 + m + n)/n, -((b*x^n)/a)))/(a^3*(b*c - a*d)^3*e*(1 + m)) + (d^2*(B*c - A*d)*(e*x)^(1 + m)*HypergeometricFunctions._₂F₁(3, (1 + m)/n, (1 + m + n)/n, -((d*x^n)/c)))/(c^3*(b*c - a*d)^3*e*(1 + m))] =#
#= ::Subsection::Closed:: =#
#=Integrands*of*the*form*(e*x)^m*(a+b*x^n)^p*(c+d*x^n)^q*(A+B*x^n)*with*p*symbolic=#
@test_int [(e*x)^m*(a + b*x^n)^p*(c + d*x^n)^q*(A + B*x^n), x, 7, (A*(e*x)^(1 + m)*(a + b*x^n)^p*(c + d*x^n)^q*AppellF1((1 + m)/n, -p, -q, (1 + m + n)/n, -((b*x^n)/a), -((d*x^n)/c)))/((1 + (b*x^n)/a)^p*(1 + (d*x^n)/c)^q*(e*(1 + m))) + (B*x^(1 + n)*(e*x)^m*(a + b*x^n)^p*(c + d*x^n)^q*AppellF1((1 + m + n)/n, -p, -q, (1 + m + 2*n)/n, -((b*x^n)/a), -((d*x^n)/c)))/((1 + (b*x^n)/a)^p*(1 + (d*x^n)/c)^q*(1 + m + n))]
#= [(e*x)^m*(a + b*x^n)^p*(c + d*x^n)^3*(A + B*x^n), x, 11, (A*c^3*(e*x)^(1 + m)*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m)/n, -p, (1 + m + n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(e*(1 + m))) + (c^2*(B*c + 3*A*d)*x^(1 + n)*(e*x)^m*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m + n)/n, -p, (1 + m + 2*n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(1 + m + n)) + (3*c*d*(B*c + A*d)*x^(1 + 2*n)*(e*x)^m*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m + 2*n)/n, -p, (1 + m + 3*n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(1 + m + 2*n)) + (d^2*(3*B*c + A*d)*x^(1 + 3*n)*(e*x)^m*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m + 3*n)/n, -p, (1 + m + 4*n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(1 + m + 3*n)) + (B*d^3*x^(1 + 4*n)*(e*x)^m*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m + 4*n)/n, -p, (1 + m + 5*n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(1 + m + 4*n))] =#
#= [(e*x)^m*(a + b*x^n)^p*(c + d*x^n)^2*(A + B*x^n), x, 9, (A*c^2*(e*x)^(1 + m)*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m)/n, -p, (1 + m + n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(e*(1 + m))) + (c*(B*c + 2*A*d)*x^(1 + n)*(e*x)^m*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m + n)/n, -p, (1 + m + 2*n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(1 + m + n)) + (d*(2*B*c + A*d)*x^(1 + 2*n)*(e*x)^m*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m + 2*n)/n, -p, (1 + m + 3*n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(1 + m + 2*n)) + (B*d^2*x^(1 + 3*n)*(e*x)^m*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m + 3*n)/n, -p, (1 + m + 4*n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(1 + m + 3*n))] =#
@test_int [(e*x)^m*(a + b*x^n)^p*(c + d*x^n)*(A + B*x^n), x, 4, -(((a*B*d*(1 + m + n) - b*(A*d*n + B*c*(1 + m + n*(2 + p))))*(e*x)^(1 + m)*(a + b*x^n)^(1 + p))/(b^2*e*(1 + m + n + n*p)*(1 + m + n*(2 + p)))) + (d*(e*x)^(1 + m)*(a + b*x^n)^(1 + p)*(A + B*x^n))/(b*e*(1 + m + n*(2 + p))) - ((A*b*(1 + m + n + n*p)*(a*d*(1 + m) - b*c*(1 + m + n*(2 + p))) - a*(1 + m)*(a*B*d*(1 + m + n) - b*(A*d*n + B*c*(1 + m + n*(2 + p)))))*(e*x)^(1 + m)*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m)/n, -p, (1 + m + n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(b^2*e*(1 + m)*(1 + m + n + n*p)*(1 + m + n*(2 + p))))]
@test_int [(e*x)^m*(a + b*x^n)^p*(A + B*x^n)/(c + d*x^n), x, 6, -(((B*c - A*d)*(e*x)^(1 + m)*(a + b*x^n)^p*AppellF1((1 + m)/n, -p, 1, (1 + m + n)/n, -((b*x^n)/a), -((d*x^n)/c)))/((1 + (b*x^n)/a)^p*(c*d*e*(1 + m)))) + (B*(e*x)^(1 + m)*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m)/n, -p, (1 + m + n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(d*e*(1 + m)))]
@test_int [(e*x)^m*(a + b*x^n)^p*(A + B*x^n)/(c + d*x^n)^2, x, 7, ((B*c - A*d)*(e*x)^(1 + m)*(a + b*x^n)^(1 + p))/(c*(b*c - a*d)*e*n*(c + d*x^n)) - ((a*d*(B*c*(1 + m) - A*d*(1 + m - n)) + b*c*(A*d*(1 + m - n*(1 - p)) - B*c*(1 + m + n*p)))*(e*x)^(1 + m)*(a + b*x^n)^p*AppellF1((1 + m)/n, -p, 1, (1 + m + n)/n, -((b*x^n)/a), -((d*x^n)/c)))/((1 + (b*x^n)/a)^p*(c^2*d*(b*c - a*d)*e*(1 + m)*n)) - (b*(B*c - A*d)*(1 + m + n*p)*(e*x)^(1 + m)*(a + b*x^n)^p*HypergeometricFunctions._₂F₁((1 + m)/n, -p, (1 + m + n)/n, -((b*x^n)/a)))/((1 + (b*x^n)/a)^p*(c*d*(b*c - a*d)*e*(1 + m)*n))]
#= [(e*x)^m*(a + b*x^n)^p*(A + B*x^n)/(c + d*x^n)^3, x, 4, (B*(e*x)^(1 + m)*(a + b*x^n)^p*(1 + (d*x^n)/c)^2*AppellF1((1 + m)/n, -p, 2, (1 + m + n)/n, -((b*x^n)/a), -((d*x^n)/c)))/((1 + (b*x^n)/a)^p*(d*e*(1 + m)*(c + d*x^n)^2)) - ((B*c - A*d)*(e*x)^(1 + m)*(a + b*x^n)^p*(1 + (d*x^n)/c)^3*AppellF1((1 + m)/n, -p, 3, (1 + m + n)/n, -((b*x^n)/a), -((d*x^n)/c)))/((1 + (b*x^n)/a)^p*(d*e*(1 + m)*(c + d*x^n)^3))] =#
#= ::Title:: =#
#=Integrands*of*the*form*(g*x)^m*(a+b*x^(n/2)^p*(c+d*x^(n/2))^p*(e+f*x^n)^r*with*b*c+a*d=0=#
#= ::Section::Closed:: =#
#=Integrands*of*the*form*x^m*(a+b*x^(n/2)^p*(c+d*x^(n/2))^p*(e+f*x^n)^r*with*b*c+a*d=0=#
@test_int [((-a + b*x^(n/2))^(-1 + 1/n)*(a + b*x^(n/2))^(-1 + 1/n)*(c + d*x^n))/x^2, x, 4, ((c/a^2 + d/b^2)*(-a + b*x^(n/2))^(1/n)*(a + b*x^(n/2))^(1/n))/x - (d*(-a + b*x^(n/2))^(1/n)*(a + b*x^(n/2))^(1/n)*HypergeometricFunctions._₂F₁(-(1/n), -(1/n), -((1 - n)/n), (b^2*x^n)/a^2))/((1 - (b^2*x^n)/a^2)^n^(-1)*(b^2*x))]
@test_int [((-a + b*x^(n/2))^((1 - n)/n)*(a + b*x^(n/2))^((1 - n)/n)*(c + d*x^n))/x^2, x, 4, ((c/a^2 + d/b^2)*(-a + b*x^(n/2))^(1/n)*(a + b*x^(n/2))^(1/n))/x - (d*(-a + b*x^(n/2))^(1/n)*(a + b*x^(n/2))^(1/n)*HypergeometricFunctions._₂F₁(-(1/n), -(1/n), -((1 - n)/n), (b^2*x^n)/a^2))/((1 - (b^2*x^n)/a^2)^n^(-1)*(b^2*x)), -(((c/a^2 + d/b^2)*(-a + b*x^(n/2))^(-1 + 1/n)*(a + b*x^(n/2))^(-1 + 1/n)*(a^2 - b^2*x^n))/x) + (a^2*d*(-a + b*x^(n/2))^(-1 + 1/n)*(a + b*x^(n/2))^(-1 + 1/n)*HypergeometricFunctions._₂F₁(-(1/n), -(1/n), -((1 - n)/n), (b^2*x^n)/a^2))/((1 - (b^2*x^n)/a^2)^((1 - n)/n)*(b^2*x))]
end
|
proofpile-julia0005-42662 | {
"provenance": "014.jsonl.gz:242663"
} |
"""
CartesianAxes
Alias for LinearIndices where indices are subtypes of `AbstractAxis`.
## Examples
```jldoctest
julia> using AxisIndices
julia> cartaxes = CartesianAxes((Axis(2.0:5.0), Axis(1:4)));
julia> cartinds = CartesianIndices((1:4, 1:4));
julia> cartaxes[2, 2]
CartesianIndex(2, 2)
julia> cartinds[2, 2]
CartesianIndex(2, 2)
```
"""
const CartesianAxes{N,R<:Tuple{Vararg{<:AbstractAxis,N}}} = CartesianIndices{N,R}
function CartesianAxes(axs::Tuple{Vararg{Any,N}}) where {N}
return CartesianIndices(map(axis -> compose_axis(axis, _inds(axis)), axs))
end
# compose_axis(axis, checks) doesn't assume one based indexing in case a range is
# passed without it, but we want to assume that's what we have here unless an axplicit
# instance of AbstractAxis is passed
_inds(axis::Integer) = One():axis
_inds(axis::AbstractVector) = One():static_length(axis)
_inds(axis::AbstractAxis) = parent(axis)
Base.axes(A::CartesianAxes) = getfield(A, :indices)
@propagate_inbounds function Base.getindex(A::CartesianIndices{N,R}, args::Vararg{Int, N}) where {N,R<:Tuple{Vararg{<:AbstractAxis,N}}}
return ArrayInterface.getindex(A, args...)
end
@propagate_inbounds function Base.getindex(A::CartesianIndices{N,R}, args...) where {N,R<:Tuple{Vararg{<:AbstractAxis,N}}}
return ArrayInterface.getindex(A, args...)
end
Base.getindex(A::CartesianIndices{N,R}, ::Ellipsis) where {N,R<:Tuple{Vararg{<:AbstractAxis,N}}} = A
"""
LinearAxes
Alias for LinearIndices where indices are subtypes of `AbstractAxis`.
## Examples
```jldoctest
julia> using AxisIndices
julia> linaxes = LinearAxes((Axis(2.0:5.0), Axis(1:4)));
julia> lininds = LinearIndices((1:4, 1:4));
julia> linaxes[2, 2]
6
julia> lininds[2, 2]
6
```
"""
const LinearAxes{N,R<:Tuple{Vararg{<:AbstractAxis,N}}} = LinearIndices{N,R}
function LinearAxes(axs::Tuple{Vararg{<:Any,N}}) where {N}
return LinearIndices(map(axis -> compose_axis(axis, _inds(axis)), axs))
end
Base.axes(A::LinearAxes) = getfield(A, :indices)
@boundscheck function Base.getindex(iter::LinearAxes, i::Int)
@boundscheck if !in(i, eachindex(iter))
throw(BoundsError(iter, i))
end
return i
end
@propagate_inbounds function Base.getindex(A::LinearAxes, inds...)
return ArrayInterface.getindex(A, inds...)
#return Base._getindex(IndexStyle(A), A, to_indices(A, Tuple(inds))...)
end
@propagate_inbounds function Base.getindex(A::LinearAxes, i::AbstractRange{I}) where {I<:Integer}
return getindex(eachindex(A), i)
end
Base.getindex(A::LinearAxes, ::Ellipsis) = A
Base.eachindex(A::LinearAxes) = SimpleAxis(StaticInt(1):static_length(A))
|
proofpile-julia0005-42663 | {
"provenance": "014.jsonl.gz:242664"
} | # Check arguments for power calculation
function check_args_power{T <: TrialTest}(
test::T;
n::Union{Real, Void} = nothing,
std::Union{Real, Tuple{Real, Real}, Void} = nothing,
alpha::Real = 0.05,
side::String = "two",
)
if isa(n, Real)
if !(2.0 <= n < Inf)
error("sample size must be in [2.0, Inf)")
end # end if
else
error("sample size must be specified")
end # end if
if isa(test, Union{OneSampleProp, OneSamplePropInferior, OneSamplePropSuperior,
OneSamplePropEqual, TwoSampleProp, TwoSamplePropInferior,
TwoSamplePropSuperior, TwoSamplePropEqual, McNemarProp})
if !isa(std, Void)
warn("standard deviation is not used in the calculations")
end # end if
elseif isa(std, Real)
if !(0.0 < std < Inf)
error("standard deviation must be in (0.0, Inf)")
end # end if
elseif isa(std, Tuple{Real, Real})
if !((min(std...) > 0.0) & (max(std...) < Inf))
error("standard deviaitons must both be in (0.0, Inf)")
end # end if
else
error("standard deviaiton must be specified")
end # end if
# The alpha should between 0 and 1
if !(0.0 < alpha < 1.0)
error("type I error rate must be in (0.0, 1.0)")
end # end if
# The test should always be two-sided or one-sided
if !in(side, ("two", "one"))
error("side must be in 'two' or 'one'")
end # end if
end # end function
# Check arguments for sample size calculations
function check_args_sample_size{T <: TrialTest}(
test::T;
power::Union{Real, Void} = nothing,
std::Union{Real, Tuple{Real, Real}, Void} = nothing,
alpha::Real = 0.05,
side::String = "two",
)
if isa(power, Real)
# The power should between 0 and 1
if !(0.0 < power < 1.0)
error("power must be in (0.0, 1.0)")
end # end if
else
error("power must be specified")
end # end if
if isa(test, Union{OneSampleProp, OneSamplePropInferior, OneSamplePropSuperior,
OneSamplePropEqual, TwoSampleProp, TwoSamplePropInferior,
TwoSamplePropSuperior, TwoSamplePropEqual, McNemarProp})
if !isa(std, Void)
warn("standard deviation is not used in the calculations")
end # end if
elseif isa(std, Real)
if !(0.0 < std < Inf)
error("standard deviation must be in (0.0, Inf)")
end # end if
elseif isa(std, Tuple{Real, Real})
if !((min(std...) > 0.0) & (max(std...) < Inf))
error("standard deviaitons must both be in (0.0, Inf)")
end # end if
else
error("standard deviaiton must be specified")
end # end if
# The alpha should between 0 and 1
if !(0.0 < alpha < 1.0)
error("type I error rate must be in (0.0, 1.0)")
end # end if
# The test should always be two-sided or one-sided
if !in(side, ("two", "one"))
error("side must be in 'two' or 'one'")
end # end if
end # end function
|
proofpile-julia0005-42664 | {
"provenance": "014.jsonl.gz:242665"
} | """
```
normals{VT,FD,FT,FO}(vertices::Vector{Point{3, VT}},
faces::Vector{Face{FD,FT,FO}},
NT = Normal{3, VT})
```
Compute all vertex normals.
"""
function normals(
vertices::AbstractVector{Point{3, VT}},
faces::AbstractVector{F},
NT = Normal{3, VT}
) where {VT, F <: Face}
normals_result = zeros(Point{3, VT}, length(vertices)) # initilize with same type as verts but with 0
for face in faces
v = vertices[face]
# we can get away with two edges since faces are planar.
a = v[2] - v[1]
b = v[3] - v[1]
n = cross(a, b)
for i =1:length(F)
fi = face[i]
normals_result[fi] = normals_result[fi] + n
end
end
normals_result .= NT.(normalize.(normals_result))
normals_result
end
"""
Slice an AbstractMesh at the specified Z axis value.
Returns a Vector of LineSegments generated from the faces at the specified
heights. Note: This will not slice in-plane faces.
"""
function slice(mesh::AbstractMesh{Point{3,VT}, Face{3, FT}}, height::Number) where {VT<:AbstractFloat,FT<:Integer}
height_ct = length(height)
# intialize the LineSegment array
slice = Simplex{2,Point{2,VT}}[]
for face in mesh.faces
v1,v2,v3 = mesh.vertices[face]
zmax = max(v1[3], v2[3], v3[3])
zmin = min(v1[3], v2[3], v3[3])
if height > zmax
continue
elseif zmin <= height
if v1[3] < height && v2[3] >= height && v3[3] >= height
p1 = v1
p2 = v3
p3 = v2
elseif v1[3] > height && v2[3] < height && v3[3] < height
p1 = v1
p2 = v2
p3 = v3
elseif v2[3] < height && v1[3] >= height && v3[3] >= height
p1 = v2
p2 = v1
p3 = v3
elseif v2[3] > height && v1[3] < height && v3[3] < height
p1 = v2
p2 = v3
p3 = v1
elseif v3[3] < height && v2[3] >= height && v1[3] >= height
p1 = v3
p2 = v2
p3 = v1
elseif v3[3] > height && v2[3] < height && v1[3] < height
p1 = v3
p2 = v1
p3 = v2
else
continue
end
start = Point{2,VT}(p1[1] + (p2[1] - p1[1]) * (height - p1[3]) / (p2[3] - p1[3]),
p1[2] + (p2[2] - p1[2]) * (height - p1[3]) / (p2[3] - p1[3]))
finish = Point{2,VT}(p1[1] + (p3[1] - p1[1]) * (height - p1[3]) / (p3[3] - p1[3]),
p1[2] + (p3[2] - p1[2]) * (height - p1[3]) / (p3[3] - p1[3]))
push!(slice, Simplex{2,Point{2, VT}}(start, finish))
end
end
return slice
end
# TODO this should be checkbounds(Bool, ...)
"""
```
checkbounds{VT,FT,FD,FO}(m::AbstractMesh{VT,Face{FD,FT,FO}})
```
Check the `Face` indices to ensure they are in the bounds of the vertex
array of the `AbstractMesh`.
"""
function Base.checkbounds(m::AbstractMesh{VT, Face{FD, FT}}) where {VT, FD, FT}
isempty(faces(m)) && return true # nothing to worry about I guess
flat_inds = reinterpret(FT, faces(m))
checkbounds(Bool, vertices(m), flat_inds)
end
|
proofpile-julia0005-42665 | {
"provenance": "014.jsonl.gz:242666"
} | #=
Code for solving Laplacian and Diagnally Dominant Systems
This is not all of it, just the short and sweet routines.
Started by Dan Spielman
Contributors: ???
lapWrapSolver: takes a solver for DD systems, and uses it to solve a lap system in la
lapWrapSolver(solver, la::AbstractArray)
lapWrapSolver(solver)
lapWrapSolver(solver, la::AbstractArray, b) = lapWrapSolver(solver,la)(b)
For example, to make a Cholesky-based solver for Laplacians, we created
lapChol = lapWrapSolver(cholfact)
augmentTree : takes a spanning tree, a graph, and adds in 2k edges of high stretch
takes both as sparse matrices
augTreeSolver : a solver that shouldn't suck
=#
include("sampler.jl")
include("randTrees.jl")
#==================================
Wrapping solver for Laplacians
==================================#
#=
function lapWrapSolver(solver, la::AbstractMatrix)
N = size(la)[1]
lasub = la[1:(N-1),1:(N-1)]
subSolver = solver(lasub);
f = function(b)
b = b - mean(b)
bs = b[1:(N-1)]
if isa(subSolver,Factorization)
xs = subSolver \ bs
else
xs = subSolver(bs)
end
x = [xs;0]
x = x - mean(x)
return x
end
return f
end
=#
"""Apply the ith solver on the ith component"""
function blockSolver(comps, solvers)
f = function(b)
x = zeros(size(b))
for i in 1:length(comps)
ind = comps[i]
bi = b[ind]
if isa(solvers[i],Factorization)
x[ind] = solvers[i] \ bi
else
x[ind] = (solvers[i])(bi)
end
end
return x
end
end
"""Takes a solver for solving nonsingular sdd systems,
and returns a solver for solving Laplacian systems.
The optional args tol and maxits are not necessarily taken by
all solvers. But, if they are, one can pass them here"""
function lapWrapSolver(solver, la::AbstractArray; tol::Real=0.0, maxits::Integer=0, maxtime=Inf)
N = size(la)[1]
lasub = la[1:(N-1),1:(N-1)]
# need to be sure that removing does not disconnect.
# or, if it does, solve on the components!
co = components(lasub)
if maximum(co) == 1
if tol > 0
# this section is updated with maxtime, other may be not
if maxits > 0
if maxtime > 0
subSolver = solver(lasub, tol=tol, maxits=maxits, maxtime=maxtime);
else
subSolver = solver(lasub, tol=tol, maxits=maxits);
end
else
if maxtime > 0
subSolver = solver(lasub, tol=tol, maxtime=maxtime);
else
subSolver = solver(lasub, tol=tol);
end
end
else
if maxits > 0
subSolver = solver(lasub, maxits=maxits);
else
subSolver = solver(lasub);
end
end
else
comps = vecToComps(co)
solvers = []
for i in 1:length(comps)
ind = comps[i]
lasubsub = lasub[ind,ind]
if (length(ind) == 1)
ssubSolver = x->(x/lasubsub[1,1])
elseif (length(ind) < 50)
ssubSolver = cholfact(lasubsub)
else
if tol > 0
if maxits > 0
ssubSolver = solver(lasubsub, tol=tol, maxits=maxits);
else
ssubSolver = solver(lasubsub, tol=tol);
end
else
if maxits > 0
ssubSolver = solver(lasubsub, maxits=maxits);
else
ssubSolver = solver(lasubsub);
end
end
end
push!(solvers, ssubSolver)
end
subSolver = blockSolver(comps,solvers)
end
f = function(b)
b = b - mean(b)
bs = b[1:(N-1)]
if isa(subSolver,Factorization)
xs = subSolver \ bs
else
xs = subSolver(bs)
end
x = [xs;0]
x = x - mean(x)
return x
end
return f
end
#=
function lapWrapSolver(solver)
f = function(la::AbstractMatrix)
return lapWrapSolver(solver, la)
end
return f
end
=#
function lapWrapSolver(solver; tol::Real=0.0, maxits::Integer=0)
f = function(la::AbstractArray)
return lapWrapSolver(solver, la, tol=tol, maxits=maxits)
end
return f
end
"""lapChol uses a Cholesky Factorization to solver systems in Laplacians"""
function lapChol()
return lapWrapSolver(cholfact)
end
#lapWrapSolver(solver, la::AbstractMatrix, b) = lapWrapSolver(solver,la)(b)
lapWrapSolver(solver, la::AbstractArray, b; tol::Real=0.0, maxits::Integer=0) = lapWrapSolver(solver,la, tol=tol, maxits=maxits)(b)
#=========================================
Augmented Spanning Tree Preconditioner
=========================================#
"""Takes as input a tree and an adjacency matrix of a graph.
It then computes the stretch of every edge of the graph wrt
the tree. It then adds back the k edges of highest stretch,
and k edges sampled according to stretch"""
function augmentTree{Tv,Ti}(tree::SparseMatrixCSC{Tv,Ti}, mat::SparseMatrixCSC{Tv,Ti}, k::Ti)
st = compStretches(tree, mat)
# just to be safe, remove the tree from this
#=
(ti,tj) = findnz(tree)
for i in 1:length(ti) # most of the time
st[ti[i],tj[i]] = 0
st[tj[i],ti[i]] = 0
end
=#
(ai,aj,av) = findnz(triu(st))
ord = sortperm(av, rev=true)
edgeinds = zeros(Bool,length(av))
for i in 1:k
edgeinds[ord[i]] = true
end
s = sum(av[ord[(k+1):end]])
probs = av * k / s
probs[ord[1:k]] = 0
edgeinds[rand(length(av)) .< probs] = true
augi = ai[edgeinds]
augj = aj[edgeinds]
augm = length(augi)
augv = zeros(Tv, augm)
for i in 1:augm,
augv = mat[augi[i],augj[i]]
end
n = size(mat)[1]
aug = sparse(augi, augj, augv, n, n)
aug = aug + aug'
return tree + aug
end
"""This is an augmented spanning tree preconditioner for diagonally dominant
linear systems. It takes as optional input a tree growing algorithm.
It adds back 2sqrt(n) edges via augmentTree: the sqrt(n) of highest stretch
and another sqrt(n) sampled according to stretch.
For most purposes, one should directly call `augTreeSolver`."""
function augTreePrecon{Tv,Ti}(ddmat::SparseMatrixCSC{Tv,Ti}; treeAlg=akpw)
adjmat = -triu(ddmat,1)
adjmat = adjmat + adjmat'
tree = treeAlg(adjmat)
n = size(ddmat)[1]
augtree = augmentTree(tree,adjmat,convert(Int,round(sqrt(n))))
Dx = spdiagm(ddmat*ones(n))
augDD = Dx + spdiagm(augtree*ones(n)) - augtree
F = cholfact(augDD)
return x -> (F\x)
end
"""
An "augmented spanning tree" solver for positive definite diagonally dominant matrices.
It works by adding edges to a low stretch spanning tree. It calls `augTreePrecon` to form
the preconditioner.
~~~julia
augTreeSolver{Tv,Ti}(ddmat::SparseMatrixCSC{Tv,Ti}; tol::Real=1e-6, maxits=Inf, maxtime=Inf, verbose=false, treeAlg=akpw)
~~~
"""
function augTreeSolver{Tv,Ti}(ddmat::SparseMatrixCSC{Tv,Ti}; tol::Real=1e-6, maxits=Inf, maxtime=Inf, verbose=false, treeAlg=akpw)
F = augTreePrecon(ddmat, treeAlg=treeAlg)
f(b;maxits=maxits, maxtime=maxtime, verbose=verbose) = pcg(ddmat, b, F, tol=tol, maxits=maxits, maxtime=maxtime, verbose=verbose)
return f
end
"""This is an augmented spanning tree preconditioner for Laplacians.
It takes as optional input a tree growing algorithm.
It adds back 2sqrt(n) edges via `augmentTree`: the sqrt(n) of highest stretch
and another sqrt(n) sampled according to stretch.
For most purposes, one should directly call `augTreeLapSolver`."""
function augTreeLapPrecon{Tv,Ti}(ddmat::SparseMatrixCSC{Tv,Ti}; treeAlg=akpw)
adjmat = -triu(ddmat,1)
adjmat = adjmat + adjmat'
tree = treeAlg(adjmat)
n = size(ddmat)[1]
augtree = augmentTree(tree,adjmat,convert(Int,round(sqrt(n))))
Dx = spdiagm(ddmat*ones(n))
augDD = Dx + spdiagm(augtree*ones(n)) - augtree
F = lapWrapSolver(cholfact,augDD)
return F
end
"""
An "augmented spanning tree" solver for Laplacian matrices.
It works by adding edges to a low stretch spanning tree. It calls `augTreeLapPrecon` to form
the preconditioner. In line with other solver, it takes as input the adjacency matrix of the system.
~~~julia
augTreeLapSolver{Tv,Ti}(a::SparseMatrixCSC{Tv,Ti}; tol::Real=1e-6, maxits=Inf, maxtime=Inf, verbose=false, treeAlg=akpw)
~~~
"""
function augTreeLapSolver{Tv,Ti}(a::SparseMatrixCSC{Tv,Ti}; tol::Real=1e-6, maxits=Inf, maxtime=Inf, verbose=false, treeAlg=akpw)
la = lap(a)
F = augTreeLapPrecon(la, treeAlg=treeAlg)
f(b;maxits=maxits, maxtime=maxtime, verbose=verbose) = pcg(la, b, F, tol=tol, maxits=maxits, maxtime=maxtime, verbose=verbose)
return f
end
"""
A wrapper for the PyAMG solver.
~~~julia
amgSolver{Tv,Ti}(ddmat::SparseMatrixCSC{Tv,Ti}; tol::Float64=1e-6, maxits=Inf, maxtime=Inf, verbose=false)
~~~
"""
function AMGSolver{Tv,Ti}(ddmat::SparseMatrixCSC{Tv,Ti}; tol::Float64=1e-6, maxits=Inf, maxtime=Inf, verbose=false)
amg = PyAMG.RugeStubenSolver(ddmat);
M = PyAMG.aspreconditioner(amg);
function F(b)
return M \ b;
end
f(b;maxits=maxits, maxtime=maxtime, verbose=verbose) = pcg(ddmat, b, F, tol=tol, maxits=maxits, maxtime=maxtime, verbose=verbose)
return f
end
"""
A wrapper for the PyAMG solver. In line with our other solvers, takes in an adjacency matrix.
~~~julia
amgSolver{Tv,Ti}(a::SparseMatrixCSC{Tv,Ti}; tol::Float64=1e-6, maxits=Inf, maxtime=Inf, verbose=false)
~~~
"""
function AMGLapSolver{Tv,Ti}(a::SparseMatrixCSC{Tv,Ti}; tol::Float64=1e-6, maxits=Inf, maxtime=Inf, verbose=false)
la = lap(a)
amg = PyAMG.RugeStubenSolver(la);
M = PyAMG.aspreconditioner(amg);
function F(b)
return M \ b;
end
f(b;maxits=maxits, maxtime=maxtime, verbose=verbose) = pcg(la, b, F, tol=tol, maxits=maxits, maxtime=maxtime, verbose=verbose)
return f
end
|
proofpile-julia0005-42666 | {
"provenance": "014.jsonl.gz:242667"
} | using Documenter
using Optinum
makedocs(
modules = [Optinum],
sitename = "Optinum.jl",
authors = "Saloua Naama, Mohamed El Waghf et Rachid ELMontassir",
format = Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"),
pages = [
"Accueil" => "index.md",
"Sujet" => "Sujet.md",
"Algorithmes" => [
"L'algorithme de Newton local" => "Algorithme_de_newton.md",
"La méthode des régions de confiance" => "Regions_de_confiance.md",
"La méthode du Lagrangien augmenté" => "Lagrangien_augmente.md"
],
"Index des fonctions" =>"fct_index.md",
"Annexes" => "Annexes.md",
#"Exemples d'appels" =>"Exemples.md",
"Installation de Julia et tests unitaires" => "mise_en_place.md",
#"Julia vs MatLab"=> "julia_vs_matlab.md",
#"Création de Modules en Julia" => "create_package.md",
#"Création d'un dépôt Git pour un module Julia" => "git_doc.md",
#"Géneration de la doc" => "generation_doc.md",
#"Intégration continue et déploiement de la Doc avec Travis" =>"Integration_continue.md",
#"Précompilation des modules"=>"Precompilation.md",
#"Génération du rapport" => "generate_pdf.md",
"Foire aux Questions" =>"FAQ.md"
]
)
deploydocs(
repo = "github.com/mathn7/Optinum.git",
branch = "gh-pages",
devbranch = "master",
)
|
proofpile-julia0005-42667 | {
"provenance": "014.jsonl.gz:242668"
} | abstract type DOFNumbering end
"""
Creates and stores the DOF numbering associated to the approximation space
constructed using the basis `b` in the `Domain` `Ω`.
In practice, the vector `dofs` contains all unique *global* identifiers (type
`T`) of degrees of freedom present in the domain `Ω` in reference to the
global information contained in `mesh`. The length of this vector depends on
the domain `Ω` but not the values of its elements. The *local* numbering of
the DOFs are given by the indices in this vector `dofs`, so in a sense this
gives a local to global numbering mapping.
The vector `elt2dof` contains, for each element (on which the basis `b` is
defined) in the domain `Ω`, the indices in the vector `dofs` of all degrees of
freedom that are (partly) supported on the said element.
Some optimization is possible in some cases where we could avoid the
allocation of one or both attributes `dofs` and `elt2dof` altogether.
Warning: this is not robust if the underlying `mesh` is modified.
"""
struct LocalDOFNumbering{S,T} <: DOFNumbering
mesh::AbstractMesh
Ω::Domain
b::PolynomialBasis
dofs::Vector{S}
elt2dof::Vector{T}
end
dofs(dofnb::LocalDOFNumbering) = dofnb.dofs
elt2dof(dofnb::LocalDOFNumbering) = dofnb.elt2dof
function _element_support_basis(mesh, b::LagrangeBasis{D,p}) where {D,p}
elts = DataType[]
for E in etypes(mesh)
if E <: AbstractElement
if typeof(domain(E)) == D
push!(elts, E)
end
end
end
@assert length(elts) == 1
return elts[1]
end
"""
One degree of freedom per mesh element in the domain `Ω`.
"""
function local_dof_numbering(mesh::GenericMesh{d,T}, Ω::Domain,
b::LagrangeBasis{D,0}) where {d,D,Np,T}
E = _element_support_basis(mesh, b)
dofs = Int64[]
for ent in entities(Ω)
append!(dofs, ent2tags(mesh)[ent][E])
end
elt2dof = collect(1:length(dofs))
return LocalDOFNumbering(mesh, Ω, b, dofs, elt2dof)
end
"""
One degree of freedom per mesh node in the domain `Ω`.
"""
function local_dof_numbering(mesh::GenericMesh, Ω::Domain,
b::LagrangeBasis{D,1}) where {D}
E = _element_support_basis(mesh, b)
N = number_of_nodes(D)
node_tags = Int64[]
for ent in entities(Ω)
ent_tags = ent2tags(mesh)[ent][E]
node_tags_ent = unique!(sort!(elements(mesh)[E][:,ent_tags][:]))
append!(node_tags, node_tags_ent)
end
dofs = unique!(sort!(node_tags))
glob2loc = SparseVector(length(nodes(mesh)),dofs,collect(1:length(dofs)))
elt2dof = SVector{N,Int64}[]
for ent in entities(Ω)
ent_tags = ent2tags(mesh)[ent][E]
node_tags_ent = elements(mesh)[E][:,ent_tags]
for ie in 1:size(node_tags_ent,2)
push!(elt2dof, glob2loc[node_tags_ent[:,ie]])
end
end
return LocalDOFNumbering(mesh, Ω, b, dofs, elt2dof)
end
function assembly(mesh, Ω, u, v; order=1,
f=(u,v)->(i,j,x̂,_,_)->u(x̂)[j]*v(x̂)[i])
submesh = view(mesh, Ω)
etype2qrule = Mesh._qrule_for_mesh(submesh, order)
E = _element_support_basis(submesh, u)
@assert E == _element_support_basis(submesh, v)
qrule = etype2qrule[E]
udofnb = local_dof_numbering(mesh, Ω, u)
vdofnb = local_dof_numbering(mesh, Ω, v)
n = length(dofs(udofnb))
m = length(dofs(vdofnb))
Is = Int64[]
Js = Int64[]
Vs = Float64[]
M = zeros(Float64,length(v),length(u))
for (iel, el) in enumerate(elements(submesh, E))
utags = elt2dof(udofnb)[iel]
vtags = elt2dof(vdofnb)[iel]
elementary_matrix!(M,el, u, v, qrule; f=f)
for (iloc, iglob) in enumerate(vtags)
for (jloc, jglob) in enumerate(utags)
push!(Is, iglob)
push!(Js, jglob)
push!(Vs, M[iloc,jloc])
end
end
fill!(M,0)
end
A = sparse(Is,Js,Vs,m,n)
return A
end
function assembly(mesh, Ω, v; order=1,
f=(v)->(i,x̂,_,_)->v(x̂)[i])
submesh = view(mesh, Ω)
etype2qrule = Mesh._qrule_for_mesh(submesh, order)
E = _element_support_basis(submesh, v)
qrule = etype2qrule[E]
vdofnb = local_dof_numbering(mesh, Ω, v)
m = length(dofs(vdofnb))
b = zeros(Float64, m)
for (iel, el) in enumerate(elements(submesh, E))
vtags = elt2dof(vdofnb)[iel]
M = elementary_matrix(el, v, qrule; f=f)
for (iloc, iglob) in enumerate(vtags)
b[iglob] += M[iloc]
end
end
return b
end |
proofpile-julia0005-42668 | {
"provenance": "014.jsonl.gz:242669"
} | using DiffEqPhysics, ForwardDiff, OrdinaryDiffEq
using StaticArrays, LinearAlgebra
using SafeTestsets, Test
@safetestset "Hamiltonian Test" begin include("hamiltonian_test.jl") end
#@safetestset "N-Body Test" begin include("nbody_test.jl") end
|
proofpile-julia0005-42669 | {
"provenance": "014.jsonl.gz:242670"
} | """
"""
module GPU # begin submodule PredictMD.GPU
############################################################################
# PredictMD.GPU source files ###############################################
############################################################################
end # end submodule PredictMD.GPU
|
proofpile-julia0005-42670 | {
"provenance": "014.jsonl.gz:242671"
} | import Zygote
using Test: @test, @testset, @inferred
using Random: bitrand
using Statistics: mean
using LinearAlgebra: I
using RestrictedBoltzmannMachines: RBM, BinaryRBM
using RestrictedBoltzmannMachines: visible, hidden, weights
using RestrictedBoltzmannMachines: energy, interaction_energy, free_energy, ∂free_energy
using RestrictedBoltzmannMachines: inputs_v_to_h, inputs_h_to_v
using WhitenedRBMs: BinaryWhiteRBM, WhiteRBM, Affine
using WhitenedRBMs: whiten, blacken, whiten_visible, whiten_hidden
using WhitenedRBMs: whiten!, whiten_visible!, whiten_hidden!
using WhitenedRBMs: energy_shift, energy_shift_visible, energy_shift_hidden
@testset "whiten / blacken" begin
rbm = @inferred BinaryRBM(randn(3), randn(2), randn(3,2))
affine_v = @inferred Affine(randn(3,3), randn(3))
affine_h = @inferred Affine(randn(2,2), randn(2))
white_rbm = @inferred whiten(rbm, affine_v, affine_h)
@test white_rbm.affine_v.u ≈ affine_v.u
@test white_rbm.affine_h.u ≈ affine_h.u
@test white_rbm.affine_v.A ≈ affine_v.A
@test white_rbm.affine_h.A ≈ affine_h.A
@test affine_v.A' * weights(white_rbm) * affine_h.A ≈ weights(rbm)
@test visible(rbm).θ ≈ visible(white_rbm).θ - weights(rbm) * affine_h.u
@test hidden(rbm).θ ≈ hidden(white_rbm).θ - weights(rbm)' * affine_v.u
@test blacken(rbm) == rbm
brbm = @inferred blacken(white_rbm)
wrbm = @inferred whiten(white_rbm)
@test visible(brbm).θ ≈ visible(wrbm).θ ≈ visible(rbm).θ
@test hidden(brbm).θ ≈ hidden(wrbm).θ ≈ hidden(rbm).θ
@test weights(brbm) ≈ weights(wrbm) ≈ weights(rbm)
@test iszero(wrbm.affine_v.u)
@test iszero(wrbm.affine_h.u)
@test wrbm.affine_v.A ≈ I
@test wrbm.affine_h.A ≈ I
# whiten_visible
affine_v_new = @inferred Affine(randn(3,3), randn(3))
white_rbm_new = @inferred whiten_visible(white_rbm, affine_v_new)
@test white_rbm_new.affine_v.u == affine_v_new.u
@test white_rbm_new.affine_v.A == affine_v_new.A
@test white_rbm_new.affine_h.u == affine_h.u
@test white_rbm_new.affine_h.A == affine_h.A
white_rbm_expected = whiten(white_rbm, affine_v_new, affine_h)
@test visible(white_rbm_new).θ ≈ visible(white_rbm_expected).θ
@test hidden(white_rbm_new).θ ≈ hidden(white_rbm_expected).θ
@test weights(white_rbm_new) ≈ weights(white_rbm_expected)
# whiten_hidden
affine_h_new = @inferred Affine(randn(2,2), randn(2))
white_rbm_new = @inferred whiten_hidden(white_rbm, affine_h_new)
@test white_rbm_new.affine_v.u == affine_v.u
@test white_rbm_new.affine_v.A == affine_v.A
@test white_rbm_new.affine_h.u == affine_h_new.u
@test white_rbm_new.affine_h.A == affine_h_new.A
white_rbm_expected = whiten(white_rbm, affine_v, affine_h_new)
@test visible(white_rbm_new).θ ≈ visible(white_rbm_expected).θ
@test hidden(white_rbm_new).θ ≈ hidden(white_rbm_expected).θ
@test weights(white_rbm_new) ≈ weights(white_rbm_expected)
wrbm1 = whiten_visible(whiten_hidden(rbm, affine_h), affine_v)
wrbm2 = whiten_hidden(whiten_visible(rbm, affine_v), affine_h)
for wrbm in (wrbm1, wrbm2)
@test wrbm.affine_v.u == affine_v.u
@test wrbm.affine_v.A == affine_v.A
@test wrbm.affine_h.u == affine_h.u
@test wrbm.affine_h.A == affine_h.A
@test visible(wrbm).θ ≈ visible(white_rbm).θ
@test hidden(wrbm).θ ≈ hidden(white_rbm).θ
@test weights(wrbm) ≈ weights(white_rbm)
end
end
@testset "energy invariance" begin
affine_v = @inferred Affine(randn(3,3), randn(3))
affine_h = @inferred Affine(randn(2,2), randn(2))
rbm = BinaryRBM(randn(3), randn(2), randn(3,2))
white_rbm = @inferred whiten(rbm, affine_v, affine_h)
v = bitrand(size(visible(rbm))..., 100)
h = bitrand(size(hidden(rbm))..., 100)
@test energy(visible(rbm), v) ≈ energy(visible(white_rbm), v) + v' * weights(rbm) * affine_h.u
@test energy(hidden(rbm), h) ≈ energy(hidden(white_rbm), h) + h' * weights(rbm)' * affine_v.u
@test inputs_v_to_h(rbm, v .- affine_v.u) ≈ @inferred inputs_v_to_h(white_rbm, v)
@test inputs_h_to_v(rbm, h .- affine_h.u) ≈ @inferred inputs_h_to_v(white_rbm, h)
ΔE = interaction_energy(rbm, affine_v.u, affine_h.u)::Real
@test @inferred(energy_shift(rbm, affine_v, affine_h)) ≈ ΔE
@test energy(rbm, v, h) ≈ energy(white_rbm, v, h) .- ΔE
@test free_energy(rbm, v) ≈ free_energy(white_rbm, v) .- ΔE
@test energy_shift_visible(rbm, affine_v) ≈ energy_shift(rbm, affine_v, one(affine_h))
@test energy_shift_hidden(rbm, affine_h) ≈ energy_shift(rbm, one(affine_v), affine_h)
end
@testset "whiten!" begin
rbm = @inferred BinaryWhiteRBM(
randn(3), randn(2), randn(3,2),
Affine(randn(3,3), randn(3)),
Affine(randn(2,2), randn(2))
)
@test iszero(energy_shift(rbm, rbm.affine_v, rbm.affine_h))
v = bitrand(size(visible(rbm))..., 100)
h = bitrand(size(hidden(rbm))..., 100)
E = energy(rbm, v, h)
F = free_energy(rbm, v)
affine_v = @inferred Affine(randn(3,3), randn(3))
affine_h = @inferred Affine(randn(2,2), randn(2))
@test energy_shift_visible(rbm, affine_v) ≈ energy_shift(rbm, affine_v, rbm.affine_h)
@test energy_shift_hidden(rbm, affine_h) ≈ energy_shift(rbm, rbm.affine_v, affine_h)
ΔE = energy_shift(rbm, affine_v, affine_h)
@test energy(whiten(rbm, affine_v, affine_h), v, h) ≈ E .+ ΔE
whiten!(rbm, affine_v, affine_h)
@test rbm.affine_v.u ≈ affine_v.u
@test rbm.affine_h.u ≈ affine_h.u
@test rbm.affine_v.A ≈ affine_v.A
@test rbm.affine_h.A ≈ affine_h.A
@test energy(rbm, v, h) ≈ E .+ ΔE
@test free_energy(rbm, v) ≈ F .+ ΔE
end
@testset "free energy" begin
affine_v = Affine(randn(3,3), randn(3))
affine_h = Affine(randn(2,2), randn(2))
white_rbm = BinaryWhiteRBM(randn(3), randn(2), randn(3,2), affine_v, affine_h)
v = bitrand(size(visible(white_rbm))...)
F = -log(sum(exp(-energy(white_rbm, v, h)) for h in [[0,0], [0,1], [1,0], [1,1]]))
@test free_energy(white_rbm, v) ≈ F
end
@testset "∂free energy" begin
affine_v = Affine(randn(3,3), randn(3))
affine_h = Affine(randn(2,2), randn(2))
white_rbm = BinaryWhiteRBM(randn(3), randn(2), randn(3,2), affine_v, affine_h)
v = bitrand(size(visible(white_rbm))...)
gs = Zygote.gradient(white_rbm) do white_rbm
mean(free_energy(white_rbm, v))
end
∂ = ∂free_energy(white_rbm, v)
@test ∂.visible.θ ≈ only(gs).visible.θ
@test ∂.hidden.θ ≈ only(gs).hidden.θ
@test ∂.w ≈ only(gs).w
end
|
proofpile-julia0005-42671 | {
"provenance": "014.jsonl.gz:242672"
} | using DataStructures: CircularBuffer
include("AlgorithmicEnv.jl")
"""
Task is to reverse content over the input tape.
http://arxiv.org/abs/1511.07275
"""
mutable struct ReverseEnv <: TapeAlgorithmicEnv
MIN_REWARD_SHORTFALL_FOR_PROMOTION::Float32
base::Int
episode_total_reward::Float32
reward_shortfalls::CircularBuffer{Float32}
charmap::Array{Char, 1}
min_length::Int
action_space::TupleSpace
observation_space::Discrete
MOVEMENTS::NTuple
READ_HEAD_START::Int
target::Array{Int8, 1} # target tape
input_data::Array{Int8, 1}
time::Int8
read_head_position::Int8 # current position of the read head.
write_head_position::Int8
last_action
last_reward::Float32
end
ReverseEnv(base::Int=2) =
ReverseEnv(-1f-1, # MIN_REWARD_SHORTFALL_FOR_PROMOTION
base, # last
0f0, # episode_total_reward
CircularBuffer{Float32}(50), # reward_shortfalls
push!(['A'+i for i=0:base-1], ' '), # charmap
1, # starting min_length
TupleSpace([Discrete(2), Discrete(2), Discrete(base)]), # action_space
Discrete(base+1), # observation_space
(:left, :right), 1, # MOVEMENTS and READ_HEAD_START
Int8[], Int8[], 0, 1, 1, nothing, 0.0)
target_from_input_data!(env::ReverseEnv) =
env.target = [char for char in reverse(env.input_data)]
|
proofpile-julia0005-42672 | {
"provenance": "014.jsonl.gz:242673"
} | using RegularizedRegression
using BaseTestNext
using Compat
const NumTestRepetitions = 30
using RegularizedRegression: learn, coefficients, RegressionResult
function make_dataset(pRange, nRange, coefRange)
N = rand(pRange)
P = rand(nRange)
X = randn(P, N)
sel = rand(1:P)
coef = rand(coefRange)
y = X[sel, :] * coef
ds = MatrixDataSet(X, y)
return N, P, sel, X, y, ds
end
function test_learner_with_includeIntercept(learnertype, maxMape = 10.0)
N, P, sel, X, y, ds = make_dataset(10:50, 2:8, 1.0:0.01:10.0)
l = learnertype()
res = learn(l, ds)
@test (typeof(res) <: Dict || typeof(res) <: RegressionResult)
beta = coefficients(l, ds)
@test typeof(beta) <: Array
@test length(beta) == P+1
beta2 = coefficients(l, ds; includeIntercept = false)
@test typeof(beta2) <: Array
@test length(beta2) == P
yhat = X' * beta2
@test RegularizedRegression.mape(yhat, y) < maxMape # Accuracy should be very high since no noise
end
function test_learner(learnertype, maxMape = 10.0)
N, P, sel, X, y, ds = make_dataset(10:50, 2:8, 1.0:0.01:10.0)
l = learnertype()
res = learn(l, ds)
@test (typeof(res) <: Dict || typeof(res) <: RegressionResult)
beta = coefficients(l, ds)
@test typeof(beta) <: Array
@test length(beta) == P
yhat = X' * beta
@test RegularizedRegression.mape(yhat, y) < maxMape # Accuracy should be very high since no noise
end
|
proofpile-julia0005-42673 | {
"provenance": "014.jsonl.gz:242674"
} | function runfig7()
p = Params(Ln = 195, W = 2500, Ls = 20, Ws = 20, scale = 40, λ = 5,
Δ = 1, d = 0, τ = 1);
ϕlist = -2π:π/10:2π
println("1")
_, esa = spectrumvsphase(ϕlist,
reconstruct(p, EZ = SA[0, 2, 0], μN = 0.6, α = 0), nev = 16)
println("2")
_, esb = spectrumvsphase(ϕlist,
reconstruct(p, EZ = SA[0, 4, 0], μN = 1.192, α = 0), nev = 16)
println("3")
_, esc = spectrumvsphase(ϕlist,
reconstruct(p, EZ = SA[0, 2, 0], μN = 0.6, α = 2), nev = 16)
println("4")
ϕs, esd = spectrumvsphase(ϕlist,
reconstruct(p, EZ = SA[0, 4, 0], μN = 1.192, α = 2), nev = 16)
savespectrumvsphase("fig7", p, ϕs, esa, esb, esc, esd)
end
# Extra: checking W and Ln dependence
# function Wtest(Wlist, p, ϕ0, panel; nev = 4, kw...)
# elist = zeros(Float64, nev, length(Wlist))
# for i in 1:length(Wlist)
# p = reconstruct(p, W = Wlist[i])
# ph = rectangle_weaklink(p; kw...)
# s = spectrum(ph(phi = ϕ0), method = ArpackPackage(sigma = 1e-7, nev = nev))
# elist[:,i] = s.energies
# println(s.energies, " W: ",Wlist[i])
# end
# return Wlist, elist
# end
# function testW(Ln0, ϕ0, panel)
# Wlist = 2000:200:2500
# p = Params(Ln = Ln0, Ls = 20, Ws = 20, scale = 40, λ = 5, Δ = 1, d = 0, τ = 1);
# if panel == "a"
# p = reconstruct(p, EZ = SA[0, 2, 0], μN = 0.6, α = 0)
# elseif panel == "b"
# p = reconstruct(p, EZ = SA[0, 4, 0], μN = 1.192, α = 0)
# elseif panel == "c"
# p = reconstruct(p, EZ = SA[0, 2, 0], μN = 0.6, α = 2)
# else
# p = reconstruct(p, EZ = SA[0, 4, 0], μN = 1.192, α = 2)
# end
# Wtest(Wlist, p, ϕ0, panel)
# end
# function Lntest(Lnlist, p, ϕ0, panel; nev = 4, kw...)
# elist = zeros(Float64, nev, length(Lnlist))
# for i in 1:length(Lnlist)
# p = reconstruct(p, Ln = Lnlist[i])
# ph = rectangle_weaklink(p; kw...)
# s = spectrum(ph(phi = ϕ0), method = ArpackPackage(sigma = 1e-7, nev = nev))
# elist[:,i] = s.energies
# println(s.energies, " Ln: ",Lnlist[i])
# end
# return Lnlist, elist
# end
# function testLn(W0, ϕ0, panel, Lnlist )
# p = Params(W = W0, Ls = 20, Ws = 20, scale = 40, λ = 5, Δ = 1, d = 0, τ = 1);
# if panel == "a"
# p = reconstruct(p, EZ = SA[0, 2, 0], μN = 0.6, α = 0)
# elseif panel == "b"
# p = reconstruct(p, EZ = SA[0, 4, 0], μN = 1.192, α = 0)
# elseif panel == "c"
# p = reconstruct(p, EZ = SA[0, 2, 0], μN = 0.6, α = 2)
# else
# p = reconstruct(p, EZ = SA[0, 4, 0], μN = 1.192, α = 2)
# end
# Lntest(Lnlist, p, ϕ0, panel)
# end
|
proofpile-julia0005-42674 | {
"provenance": "014.jsonl.gz:242675"
} | function compute_thrust_direction(obj::ThrustDirectionProvider, arg0::PVCoordinatesProvider, arg1::AbsoluteDate, arg2::Frame)
return jcall(obj, "computeThrustDirection", Vector3D, (PVCoordinatesProvider, AbsoluteDate, Frame), arg0, arg1, arg2)
end
|
proofpile-julia0005-42675 | {
"provenance": "014.jsonl.gz:242676"
} | module MsgPackRpcClient
include("const.jl")
include("base.jl")
include("socks.jl")
include("session.jl")
include("future.jl")
using MsgPack
export MsgPackRpcClientSession, call, get
function call(s::MsgPackRpcClientSession.Session, method::String, params...; sync::Bool = DEFAULT_SYNC_POLICY, sock = nothing)
if sock == nothing
try
sock = s.get_sock()
catch
MsgPackRpcClientSocks.connect_and_push!(s.socks)
s.ptr = 1
sock = s.get_sock()
end
end
# NOTE: Get msg_id and calculate next_id never to use the same msg_id even if a request fails.
msg_id = s.next_id
# NOTE: For compatibility, msgid must be Int32
s.next_id = s.next_id >= 1<<(BIT_OF_MSGID - 1) ? 0 : s.next_id + 1
future = send_request(sock, msg_id, method, params)
if sync == true
receive_response(sock, future)
return get(future)
else
future.task = @async receive_response(sock, future)
return future
end
end
function send_request(sock, msg_id::Int, method::String, args)
params = MsgPackRpcClientBase.to_a(args)
packed_data = MsgPack.pack({REQUEST, msg_id, method, params})
if typeof(sock) == Base.TcpSocket
send_data_in_tcp(sock, packed_data)
#else if typeof(sock) == Base.UdpSocket
# send_data_in_udp(sock, packed_data, host, port)
#else
# # error
end
MsgPackRpcClientFuture.Future(TIMEOUT_IN_SEC, nothing, nothing, nothing, false, nothing, nothing, nothing, msg_id, nothing)
end
function send_data_in_tcp(sock::Base.TcpSocket, data)
write(sock, data)
nothing
end
function send_data_in_udp(sock::Base.UdpSocket, data, host::String, port::Int)
send(sock, host, port, data)
nothing
end
function get(future::MsgPackRpcClientFuture.Future)
if future.task != nothing
wait(future.task)
end
if future.error == nothing
# if future.result_handler != nothing
# return result_handler.call()
# end
return future.result
end
if future.result == nothing
# TODO: raise instead of return
return nothing
end
# if future.error_handler != nothing
# return error_handler.call()
# end
# TODO: raise instead of return
nothing
end
function receive_response(sock, future::MsgPackRpcClientFuture.Future; interval = DEFAULT_INTERVAL)
unpacked_data = {}
timeout = future.timeout
while 0 <= timeout
receive_data(sock, future)
# if future.is_set == false
# println("continue")
# sleep(1)
# continue
# end
unpacked_data = MsgPack.unpack(future.raw)
if unpacked_data[1] != RESPONSE || unpacked_data[2] != future.msg_id # type, msgid
timeout -= interval
sleep(interval)
continue
else
break
end
return nothing
end
if unpacked_data[3] != nothing # error
future.error = unpacked_data[3]
end
if unpacked_data[4] != nothing #result
future.result = unpacked_data[4]
end
future
end
function receive_data(sock, future::MsgPackRpcClientFuture.Future)
join(sock, future)
nothing
end
function join(sock, future::MsgPackRpcClientFuture.Future; interval = DEFAULT_INTERVAL)
timeout = future.timeout
while future.is_set == false && timeout > 0
future.raw = readavailable(sock)
if length(future.raw) > 0
future.is_set = true
break
end
sleep(interval)
timeout -= interval
end
future
end
end # module MsgPackRpcClient
|
proofpile-julia0005-42676 | {
"provenance": "014.jsonl.gz:242677"
} | using SafeTestsets
@safetestset "Unit tests" begin
include("unittest.jl")
end
@safetestset "End to end test" begin
include("full.jl")
end
|
proofpile-julia0005-42677 | {
"provenance": "014.jsonl.gz:242678"
} | # Tensor product basis
# ------------------------------------------------------------------------------------------
struct TensorProductBasis{B1 <: BasisSet, B2 <: BasisSet} <: BasisSet
b1 :: B1
b2 :: B2
end
Base.length(b::TensorProductBasis) = length(b.b1) * length(b.b2)
leftindex(b::TensorProductBasis, idx::Integer) = rem(idx - 1, length(b.b1)) + 1
rightindex(b::TensorProductBasis, idx::Integer) = div(idx - 1, length(b.b1)) + 1
struct ProductExtensionOperator{B1 <: BasisSet, B2 <: BasisSet, O1 <: LinearOperator{B1}, O2 <: LinearOperator{B2}} <: LinearOperator{TensorProductBasis{B1, B2}}
tpb :: TensorProductBasis{B1, B2}
op1 :: O1
op2 :: O2
end
basis(op::ProductExtensionOperator) = op.tpb
function apply(i::Integer, op::ProductExtensionOperator, j::Integer)
i1, i2 = leftindex(op.tpb, i), rightindex(op.tpb, i)
j1, j2 = leftindex(op.tpb, j), rightindex(op.tpb, j)
apply(i1, op.op1, j1) * apply(i2, op.op2, j2)
end
# Tensor sum basis
# ------------------------------------------------------------------------------------------
struct TensorSumBasis{B1 <: BasisSet, B2 <: BasisSet} <: BasisSet
b1 :: B1
b2 :: B2
end
Base.length(b::TensorSumBasis) = length(b.b1) + length(b.b2)
struct SumExtensionOperator{B1 <: BasisSet,B2 <: BasisSet,O1 <: LinearOperator{B1}, O2 <: LinearOperator{B2}} <: LinearOperator{TensorSumBasis{B1, B2}}
tsb :: TensorSumBasis{B1, B2}
op1 :: O1
op2 :: O2
end
basis(op::SumExtensionOperator) = op.tsb
function apply(i::Integer, op::SumExtensionOperator, j::Integer) :: ComplexF64
N1 = length(op.tsb.b1)
if i <= N1 && j <= N1
apply(i, op.op1, j)
elseif i > N1 && j > N1
apply(i - N1, op.op2, j - N1)
else
0.0
end
end
|
proofpile-julia0005-42678 | {
"provenance": "014.jsonl.gz:242679"
} | # # Julia Crash Course
# Hello! This is a quick intro to programming in Julia to help you hit the ground running with the _12 Steps to Navier–Stokes_.
# There are two ways to enjoy these lessons with Julia:
# 1. You can download and install a Julia distribution on your computer. Our recommendation is via [julialang.org](julialang.org). Open them up in your favorite IDE and run them one at a time.
# 2. You can run Julia in a Pluto notebook with `using Pluto; Pluto.run()`.
# In either case, you will probably want to download a copy of this notebook, or the whole AeroPython (?) collection. We recommend that you then follow along each lesson, experimenting with the code in the notebooks, or typing the code into a separate Julia interactive session.
#If you decided to work on your local Julia installation, you will have to navigate in the terminal to the folder that contains the `.jl` files. Then, to launch the notebook server, just type:
#`julia> using Pluto; Pluto.run()`
#You will get a new browser window or tab with a list of the notebooks available in that folder. Click on one and start working!
## Libraries
#Julia is a high-level open-source language. But the _Julia world_ is inhabited by many packages or libraries that provide useful things like gpu kernels, plotting functions, and much more. We can import libraries of functions to expand the capabilities of Python in our programs.
#OK! We'll start by importing a few libraries to help us out. First: our favorite library is **NumPy**, ... Just kidding! We don't need NumPy. We will need a 2D plotting library which we will to plot our results.
#The following code will be at the top of most of your programs, so execute this cell first:
#```julia
## <-- comments in Julia are denoted by the pound sign, like this one
#using LinearAlgebra # we import the array library
#using Plots # import plotting library
#```
#We are importing one library named `LinearAlgebra` and we are importing a package called `Plots`.
#To use a function belonging to one of these libraries, we have to tell Julia where to look for it. For that, each function name is written following the library name, with a dot in between.
#So if we want to use the unexported functions in `LinearAlgebra`, we have to put a name in front of it:
#
#
myarray = [ 1 2; 3 4 ]
myarray
LinearAlgebra.norm(myarray)
5.47722
# In this tutorial, it's not too important to know where functions are defined since `using Pkg` calls them all into the global namespace to be used, but if
# you were developing your own code to share with others some additional details about namespacing could save you some headaches.
## Variables
# Julia doesn't require explicitly declared variable types like C and other languages.
a = 5 #a is an integer 5
b = "five" #b is a string of the word "five'
c = 5.0 #c is a floating point 5
type(a)
type(b)
type(c)
# Note that if you divide an integer by an integer that yields a float. If you want to use integer division use the `div` operator.
## Whitespace in Julia
#
# Doesn't exist!
#
## Slicing Arrays
#In Julia, you can look at portions of arrays in the same way as in `Matlab`, with a few extra tricks thrown in. Let's take an array of values from 1 to 5.
myvals = [1, 2, 3, 4, 5]
#Julia uses a **one-based index**, so let's look at the first and last element in the array `myvals`
myvals[1], myvals[5]
#Note here, the slice is inclusive on the front end the back end.
## Assigning Array Variables
# One of the strange little quirks/features in Python that often confuses people comes up when assigning and comparing arrays of values.
# Here is a quick example. Let's start by defining a 1-D array called $a$:
a = collect(range(start = 1.0, stop = 5.0, length = 5))
#OK, so we have an array $a$, with the values 1 through 5. I want to make a copy of that array, called $b$, so I'll try the following:
b = a
# Great. So $a$ has the values 1 through 5 and now so does $b$.
# Now that I have a backup of $a$, I can change its values without worrying about losing data (or so I may think!).
a[2] = 17
a
#Here, the 2nd element of $a$ has been changed to 17. Now let's check on $b$.
b
# And that's how things go wrong! When you use a statement like $a = b$, rather than copying all the values of $a$ into a new array called $b$,
# Julia just creates an alias (or a pointer) called $b$ and tells it to route us to $a$.
# So if we change a value in $a$ then $b$ will reflect that change (technically, this is called *assignment by reference*).
# If you want to make a true copy of the array, you have to tell Julia to copy every element of $a$ into a new array. Let's call it $c$.
c = copy(a)
# Now, we can try again to change a value in $a$ and see if the changes are also seen in $c$.
a[2] = 3
a
c
# OK, it worked! If the difference between `a = b` and `a = b.copy()` is unclear, you should read through this again. This issue will come back to haunt you otherwise.
## Learn More
# There are a lot of resources online to learn more about using Julia and other libraries.
# Just for kicks, here we use Pluto's feature for embedding videos to point you to a short video on YouTube on using NumPy arrays.
## TODO
#from IPython.display import YouTubeVideo
# a short video about using NumPy arrays, from Enthought
#YouTubeVideo('vWkb7VahaXQ')
|
proofpile-julia0005-42679 | {
"provenance": "014.jsonl.gz:242680"
} |
function surf(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.surf(gr.ptr, Data(z).ptr, stl, opt)
end
function surf(gr::Graph, x::Array{cmgl.Float}, y::Array{cmgl.Float}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.surf_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function mesh(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.mesh(gr.ptr, Data(z).ptr, stl, opt)
end
function mesh(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.mesh_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function fall(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.fall(gr.ptr, Data(z).ptr, stl, opt)
end
function fall(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.fall_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function belt(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.belt(gr.ptr, Data(z).ptr, stl, opt)
end
function belt(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.belt_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function boxs(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.boxs(gr.ptr, Data(z).ptr, stl, opt)
end
function boxs(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.boxs_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function tile(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.tile(gr.ptr, Data(z).ptr, stl, opt)
end
function tile(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.tile_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function dens(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.dens(gr.ptr, Data(z).ptr, stl, opt)
end
function dens(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.dens_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function cont(gr::Graph, v::Array{cmgl.Float, 1}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.cont_val(gr.ptr, Data(v).ptr, Data(z).ptr, stl, opt)
end
function cont(gr::Graph, v::Array{cmgl.Float, 1}, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.cont_xy_val(gr.ptr, Data(v).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function cont(gr::Graph, v::cmgl.Float, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.cont_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(z).ptr, stl, opt)
end
function cont(gr::Graph, v::cmgl.Float, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.cont_xy_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function cont(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.cont(gr.ptr, Data(z).ptr, stl, opt)
end
function cont(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.cont_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function contf(gr::Graph, v::Array{cmgl.Float, 1}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contf_val(gr.ptr, Data(v).ptr, Data(z).ptr, stl, opt)
end
function contf(gr::Graph, v::Array{cmgl.Float, 1}, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contf_xy_val(gr.ptr, Data(v).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function contf(gr::Graph, v::cmgl.Float, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contf_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(z).ptr, stl, opt)
end
function contf(gr::Graph, v::cmgl.Float, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contf_xy_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function contf(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contf(gr.ptr, Data(z).ptr, stl, opt)
end
function contf(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contf_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function contd(gr::Graph, v::Array{cmgl.Float, 1}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contd_val(gr.ptr, Data(v).ptr, Data(z).ptr, stl, opt)
end
function contd(gr::Graph, v::Array{cmgl.Float, 1}, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contd_xy_val(gr.ptr, Data(v).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function contd(gr::Graph, v::cmgl.Float, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contd_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(z).ptr, stl, opt)
end
function contd(gr::Graph, v::cmgl.Float, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contd_xy_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function contd(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contd(gr.ptr, Data(z).ptr, stl, opt)
end
function contd(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contd_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function contv(gr::Graph, v::Array{cmgl.Float, 1}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contv_val(gr.ptr, Data(v).ptr, Data(z).ptr, stl, opt)
end
function contv(gr::Graph, v::Array{cmgl.Float, 1}, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contv_xy_val(gr.ptr, Data(v).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function contv(gr::Graph, v::cmgl.Float, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contv_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(z).ptr, stl, opt)
end
function contv(gr::Graph, v::cmgl.Float, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contv_xy_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function contv(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contv(gr.ptr, Data(z).ptr, stl, opt)
end
function contv(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.contv_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function axial(gr::Graph, v::Array{cmgl.Float, 1}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.axial_val(gr.ptr, Data(v).ptr, Data(z).ptr, stl, opt)
end
function axial(gr::Graph, v::Array{cmgl.Float, 1}, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.axial_xy_val(gr.ptr, Data(v).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function axial(gr::Graph, v::cmgl.Float, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.axial_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(z).ptr, stl, opt)
end
function axial(gr::Graph, v::cmgl.Float, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.axial_xy_val(gr.ptr, Data(cmgl.Float[v]).ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function axial(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.axial(gr.ptr, Data(z).ptr, stl, opt)
end
function axial(gr::Graph, x::Array{cmgl.Float, 2}, y::Array{cmgl.Float, 2}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.axial_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
function grid(gr::Graph, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.grid(gr.ptr, Data(z).ptr, stl, opt)
end
function grid(gr::Graph, x::Array{cmgl.Float}, y::Array{cmgl.Float}, z::Array{cmgl.Float, 2}; stl::String="", opt::String="")
cmgl.grid_xy(gr.ptr, Data(x).ptr, Data(y).ptr, Data(z).ptr, stl, opt)
end
|
proofpile-julia0005-42680 | {
"provenance": "014.jsonl.gz:242681"
} | __precompile__(true)
"""
Pandoc
Pandoc wrapper to read JSON AST from `pandoc`
See https://hackage.haskell.org/package/pandoc-types-1.17.5.4/docs/Text-Pandoc-Definition.html
"""
module Pandoc
import JSON
import Markdown
const PANDOC_JL_EXECUTABLE = get(ENV, "PANDOC_JL_EXECUTABLE", "pandoc")
@enum Alignment AlignLeft=1 AlignRight=2 AlignCenter=3 AlignDefault=4
@enum ListNumberStyle DefaultStyle=1 Example=2 Decimal=3 LowerRoman=4 UpperRoman=5 LowerAlpha=6 UpperAlpha=7
@enum ListNumberDelim DefaultDelim=1 Period=2 OneParen=3 TwoParens=4
@enum QuoteType SingleQuote=1 DoubleQuote=2
@enum MathType DisplayMath=1 InlineMath=2
@enum CitationMode AuthorInText=1 SuppressAuthor=2 NormalCitation=3
abstract type Element end
abstract type Inline <: Element end
abstract type Block <: Element end
const Format = String
const TableCell = Vector{Block}
struct Attributes
identifier::String
classes::Vector{String}
attributes::Vector{Pair{String, String}}
end
function Attributes(identifier, classes, attributes::Vector{Any})
attributes = Pair{String, String}[attr[1] => attr[2] for attr in attributes]
return Attributes(identifier, classes, attributes)
end
Attributes() = Attributes("", [], [],)
struct Citation
mode::CitationMode
prefix::Vector{Inline}
hash::Int
id::String
suffix::Vector{Inline}
note_number::Int
end
struct ListAttributes
number::Int
style::ListNumberStyle
delim::ListNumberDelim
end
"""Plain text, not a paragraph"""
struct Plain <: Block
content:: Vector{Inline}
end
"""Paragraph"""
struct Para <: Block
content::Vector{Inline}
end
"""Multiple non-breaking lines"""
struct LineBlock <: Block
content::Vector{Vector{Inline}}
end
"""Code block (literal) with attributes"""
struct CodeBlock <: Block
attr::Attributes
content::String
end
CodeBlock(content) = CodeBlock(Attributes(), content)
"""Raw block"""
struct RawBlock <: Block
format::Format
content::String
end
"""Block quote (list of blocks)"""
struct BlockQuote <: Block
content::Vector{Block}
end
"""Ordered list (attributes and a list of items, each a list of blocks)"""
struct OrderedList <: Block
attr::ListAttributes
content::Vector{Vector{Block}}
end
"""Bullet list (list of items, each a list of blocks)"""
struct BulletList <: Block
content::Vector{Vector{Block}}
end
"""
Definition list Each list item is a pair consisting of a term (a list of inlines) and one or more definitions (each a list of blocks)"""
struct DefinitionList <: Block
content::Vector{Pair{Vector{Inline}, Vector{Vector{Block}}}}
end
"""Header - level (integer) and text (inlines)"""
struct Header <: Block
level::Int
attr::Attributes
content::Vector{Element}
end
Header(level) = Header(level, Attributes(), [])
Header(level, content) = Header(level, Attributes(), content)
"""Horizontal rule"""
struct HorizontalRule <: Block end
"""Table, with caption, column alignments (required), relative column widths (0 = default), column headers (each a list of blocks), and rows (each a list of lists of blocks)"""
struct Table <: Block
content::Vector{Inline}
alignments::Vector{Alignment}
widths::Vector{Float64}
headers::Vector{TableCell}
rows::Vector{Vector{TableCell}}
end
"""Generic block container with attributes"""
struct Div <: Block
attr::Attributes
content::Vector{Block}
end
Div(content) = Div(Attributes(), content)
struct Null <: Block end
struct Target
url::String
title::String
end
"""Text (string)"""
struct Str <: Inline
content::String
end
"""Emphasized text (list of inlines)"""
struct Emph <: Inline
content::Vector{Inline}
end
"""Strongly emphasized text (list of inlines)"""
struct Strong <: Inline
content::Vector{Inline}
end
"""Strikeout text (list of inlines)"""
struct Strikeout <: Inline
content::Vector{Inline}
end
"""Superscripted text (list of inlines)"""
struct Superscript <: Inline
content::Vector{Inline}
end
"""Subscripted text (list of inlines)"""
struct Subscript <: Inline
content::Vector{Inline}
end
"""Small caps text (list of inlines)"""
struct SmallCaps <: Inline
content::Vector{Inline}
end
"""Quoted text (list of inlines)"""
struct Quoted <: Inline
quote_type::QuoteType
content::Vector{Inline}
end
"""Citation (list of inlines)"""
struct Cite <: Inline
citations::Vector{Citation}
content::Vector{Inline}
end
"""Inline code (literal)"""
struct Code <: Inline
attr::Attributes
content::String
end
Code(content) = Code(Attributes(), content)
"""Inter-word space"""
struct Space <: Inline end
"""Soft line break"""
struct SoftBreak <: Inline end
"""Hard line break"""
struct LineBreak <: Inline end
"""TeX math (literal)"""
struct Math <: Inline
math_type::MathType
content::String
end
"""Raw inline"""
struct RawInline <: Inline
format::Format
content::String
end
"""Hyperlink: alt text (list of inlines), target"""
struct Link <: Inline
attr::Attributes
content::Vector{Inline}
target::Target
end
Link(content, target) = Link(Attributes(), content, target)
"""Image: alt text (list of inlines), target"""
struct Image <: Inline
attr::Attributes
content::Vector{Inline}
target::Target
end
Image(content, target) = Image(Attributes(), content, target)
"""Footnote or endnote"""
struct Note <: Inline
content::Vector{Block}
end
"""Generic inline container with attributes"""
struct Span <: Inline
attr::Attributes
content::Vector{Inline}
end
Span(content) = Span(Attributes(), content)
struct Unknown
e
t
end
pandoc_api_version() = v"1.17.5-4"
pandoc_api_version(v) = pandoc_api_version(v, Val(length(v)))
pandoc_api_version(v, length::Val{0}) = error("Version array has to be length > 0 but got `$v` instead")
pandoc_api_version(v, length::Val{1}) = VersionNumber(v[1])
pandoc_api_version(v, length::Val{2}) = VersionNumber(v[1], v[2])
pandoc_api_version(v, length::Val{3}) = VersionNumber(v[1], v[2], v[3])
pandoc_api_version(v, length) = VersionNumber(v[1], v[2], v[3], tuple(v[4:end]...))
mutable struct Document
data::Dict{String, Any}
pandoc_api_version::VersionNumber
meta::Dict{String, Any}
blocks::Vector{Element}
end
function Document(data)
pav = pandoc_api_version(data["pandoc-api-version"])
meta = data["meta"]
blocks = get_elements(data["blocks"])
return Document(data, pav, meta, blocks)
end
function get_element(t, e)
u = Unknown(e, t)
@warn "Unknown element parsed: $u"
return u
end
get_element(::Type{Null}, e) = Null()
get_element(::Type{SoftBreak}, e) = SoftBreak()
get_element(::Type{LineBreak}, e) = LineBreak()
get_element(::Type{HorizontalRule}, e) = HorizontalRule()
get_element(::Type{Space}, e) = Space()
get_element(::Type{Str}, e) = Str(e["c"])
function get_element(::Type{LineBlock}, e)
content = Vector{Inline}[]
for inlines in e["c"]
push!(content, Inline[get_element(i) for i in inlines])
end
return LineBlock(content)
end
function get_element(::Type{Superscript}, e)
return Superscript(Inline[get_element(i) for i in e["c"]])
end
function get_element(::Type{Subscript}, e)
return Subscript(Inline[get_element(i) for i in e["c"]])
end
function get_element(::Type{Note}, e)
return Note(Block[get_element(i) for i in e["c"]])
end
function get_element(::Type{Math}, e)
math_type = eval(Symbol(e["c"][1]["t"]))
content = e["c"][2]::String
return Math(math_type, content)
end
function get_element(::Type{RawInline}, e)
format = e["c"][1]::String
content = e["c"][2]::String
return RawInline(format, content)
end
function get_element(::Type{RawBlock}, e)
format = e["c"][1]::String
content = e["c"][2]::String
return RawBlock(format, content)
end
function get_element(::Type{Quoted}, e)
quote_type = eval(Symbol(e["c"][1]["t"]))
content = Inline[get_element(i) for i in e["c"][2]]
return Quoted(quote_type, content)
end
function get_element(::Type{Table}, e)
c = e["c"]
content = Inline[get_element(i) for i in c[1]]
alignments = Alignment[ eval(Symbol(a["t"])) for a in c[2] ]
widths = Float64[w for w in c[3]]
headers = TableCell[]
rows = Vector{TableCell}[]
for blocks in c[4]
push!(headers, Block[ get_element(b) for b in blocks ])
end
for list_of_blocks in c[5]
row = TableCell[]
for blocks in list_of_blocks
push!(row, Block[ get_element(b) for b in blocks ])
end
push!(rows, row)
end
return Table(content, alignments, widths, headers, rows)
end
function get_element(::Type{Span}, e)
return Span(
Attributes(e["c"][1]...),
Inline[get_element(i) for i in e["c"][2]]
)
end
function get_element(::Type{Image}, e)
attr = Attributes(e["c"][1]...)
content = Inline[get_element(i) for i in e["c"][2]]
target = Target(e["c"][3]...)
return Image(attr, content, target)
end
function get_element(::Type{Strikeout}, e)
return Strikeout(Inline[get_element(i) for i in e["c"]])
end
function get_element(::Type{SmallCaps}, e)
return SmallCaps(Inline[get_element(i) for i in e["c"]])
end
function get_element(::Type{Strong}, e)
return Strong(Inline[get_element(i) for i in e["c"]])
end
function get_element(::Type{DefinitionList}, e)
c = e["c"]
dl = Vector{Pair{Vector{Inline}, Vector{Vector{Block}}}}( [] )
for items in c
vector_blocks = Vector{Block}[]
for blocks in items[2]
push!(Block[get_element(b) for b in blocks])
end
inlines = Inline[get_element(i) for i in items[1]]
push!(dl, (inlines => vector_blocks))
end
return DefinitionList(dl)
end
function get_element(::Type{Div}, e)
attr = Attributes(e["c"][1]...)
blocks = Block[get_element(b) for b in e["c"][2]]
return Div(attr, blocks)
end
function get_element(::Type{BlockQuote}, e)
blocks = Block[get_element(b) for b in e["c"]]
return BlockQuote(blocks)
end
function get_element(::Type{ListAttributes}, e)
number = e[1]
style = eval(Symbol(e[2]["t"]))
delim = eval(Symbol(e[3]["t"]))
return ListAttributes(number, style, delim)
end
function get_element(::Type{OrderedList}, e)
attr = get_element(ListAttributes, e["c"][1])
ol = Vector{Block}[]
for elements in e["c"][2]
sol = Block[]
for element in elements
el = get_element(element)
push!(sol, el)
end
push!(ol, sol)
end
return OrderedList(attr, ol)
end
function get_element(::Type{Citation}, e)
mode = eval(Symbol(e["citationMode"]["t"]))
prefix = Inline[get_element(i) for i in e["citationPrefix"]]
hash = e["citationHash"]::Int
id = e["citationId"]::String
suffix = Inline[get_element(i) for i in e["citationSuffix"]]
note_number = e["citationNoteNum"]::Int
return Citation(mode, prefix, hash, id, suffix, note_number)
end
function get_element(::Type{Cite}, e)
citations = Citation[get_element(Citation, c) for c in e["c"][1]]
inlines = Inline[get_element(i) for i in e["c"][2]]
return Cite(citations, inlines)
end
function get_element(::Type{Code}, e)
attr = Attributes(e["c"][1]...)
content = e["c"][2]
return Code(attr, content)
end
function get_element(::Type{Plain}, e)
content = Inline[]
for se in e["c"]
push!(content, get_element(se))
end
return Plain(content)
end
function get_element(::Type{CodeBlock}, e)
attr = Attributes(e["c"][1]...)
content = e["c"][2]::String
return CodeBlock(attr, content)
end
function get_element(::Type{BulletList}, e)
bl = Vector{Block}[]
for elements in e["c"]
sbl = Block[]
for element in elements
el = get_element(element)
push!(sbl, el)
end
push!(bl, sbl)
end
return BulletList(bl)
end
function get_element(::Type{Emph}, e)
return Emph(Element[get_element(se) for se in e["c"]])
end
function get_element(::Type{Para}, e)
return Para(Element[get_element(se) for se in e["c"]])
end
function get_element(::Type{Link}, e)
c = e["c"]
identifier = c[1][1]::String
classes = String[s for s in c[1][2]]
attributes = Pair[ (String(k) => String(v)) for (k,v) in c[1][3] ]
content = Element[]
for se in c[2]
push!(content, get_element(se))
end
target = Target(c[3][1], c[3][2])
return Link(Attributes(identifier, classes, attributes), content, target)
end
function get_element(::Type{Header}, e)
c = e["c"]
level = c[1]::Int
identifier = c[2][1]::String
classes = String[s for s in c[2][2]]
attributes = Pair[ (String(k) => String(v)) for (k,v) in c[2][3] ]
content = Element[]
for se in c[3]
push!(content, get_element(se))
end
return Header(level, Attributes(identifier, classes, attributes), content)
end
get_element(e) = get_element(Val{Symbol(e["t"])}(), e)
get_element(::Val{:Header}, e) = get_element(Header, e)
get_element(::Val{:Link}, e) = get_element(Link, e)
get_element(::Val{:Space}, e) = get_element(Space, e)
get_element(::Val{:Emph}, e) = get_element(Emph, e)
get_element(::Val{:Str}, e) = get_element(Str, e)
get_element(::Val{:Para}, e) = get_element(Para, e)
get_element(::Val{:HorizontalRule}, e) = get_element(HorizontalRule, e)
get_element(::Val{:BulletList}, e) = get_element(BulletList, e)
get_element(::Val{:Plain}, e) = get_element(Plain, e)
get_element(::Val{:Code}, e) = get_element(Code, e)
get_element(::Val{:CodeBlock}, e) = get_element(CodeBlock, e)
get_element(::Val{:LineBreak}, e) = get_element(LineBreak, e)
get_element(::Val{:Cite}, e) = get_element(Cite, e)
get_element(::Val{:BlockQuote}, e) = get_element(BlockQuote, e)
get_element(::Val{:OrderedList}, e) = get_element(OrderedList, e)
get_element(::Val{:DefinitionList}, e) = get_element(DefinitionList, e)
get_element(::Val{:Strong}, e) = get_element(Strong, e)
get_element(::Val{:SmallCaps}, e) = get_element(SmallCaps, e)
get_element(::Val{:Span}, e) = get_element(Span, e)
get_element(::Val{:Strikeout}, e) = get_element(Strikeout, e)
get_element(::Val{:Image}, e) = get_element(Image, e)
get_element(::Val{:Table}, e) = get_element(Table, e)
get_element(::Val{:SoftBreak}, e) = get_element(SoftBreak, e)
get_element(::Val{:Quoted}, e) = get_element(Quoted, e)
get_element(::Val{:RawInline}, e) = get_element(RawInline, e)
get_element(::Val{:RawBlock}, e) = get_element(RawBlock, e)
get_element(::Val{:Math}, e) = get_element(Math, e)
get_element(::Val{:Superscript}, e) = get_element(Superscript, e)
get_element(::Val{:Subscript}, e) = get_element(Subscript, e)
get_element(::Val{:Note}, e) = get_element(Note, e)
get_element(::Val{:LineBlock}, e) = get_element(LineBlock, e)
get_element(::Val{:Div}, e) = get_element(Div, e)
get_element(::Val{:Null}, e) = get_element(Null, e)
function get_elements(blocks)
elements = Element[]
for e in blocks
push!(elements, get_element(e))
end
return elements
end
function parse(markdown)
cmd = pipeline(`echo $markdown`, `$PANDOC_JL_EXECUTABLE -t json`)
data = read(cmd, String)
return Document(JSON.parse(data))
end
function parse_file(filename)
cmd = `$PANDOC_JL_EXECUTABLE -t json $filename`
data = read(cmd, String)
return Document(JSON.parse(data))
end
exists() = run(`which $PANDOC_JL_EXECUTABLE`, wait=false) |> success
### Convert
function Base.convert(::Type{Document}, md::Markdown.MD)
pav = v"1.17.5-4"
meta = Dict{String, Any}()
data = Dict{String, Any}()
blocks = Element[]
for e in md.content
push!(blocks, convert(Element, e))
end
return Document(data, pav, meta, blocks)
end
Base.convert(::Type{Element}, e::AbstractString) = convert(Str, e)
Base.convert(::Type{Inline}, e::AbstractString) = convert(Str, e)
Base.convert(::Type{Str}, e::AbstractString) = Str(e)
Base.convert(::Type{Element}, e::Markdown.Header) = convert(Header, e)
function Base.convert(::Type{Header}, e::Markdown.Header{V}) where V
content = Element[]
for s in split(e.text[1])
push!(content, convert(Str, s))
push!(content, Space())
end
if length(content) > 0 && content[end] isa Space
pop!(content) # remove last Space()
end
return Header(
V #= level =#,
Attributes(
replace(lowercase(e.text[1]), " " => "-"),
[],
[]
),
content,
)
end
function _convert(::Type{Vector{Inline}}, text::AbstractString)
return content
end
Base.convert(::Type{Element}, e::Markdown.Link) = convert(Link, e)
Base.convert(::Type{Inline}, e::Markdown.Link) = convert(Link, e)
function Base.convert(::Type{Link}, e::Markdown.Link)
content = Inline[]
text = if e.text isa AbstractString
e.text
else
length(e.text) > 0 ? e.text[1] : ""
end
for s in split(text)
push!(content, convert(Str, s))
push!(content, Space())
end
pop!(content) # remove last Space()
target = Target(e.url, "")
return Link(
Attributes(),
content,
target,
)
end
Base.convert(::Type{Element}, e::Markdown.Italic) = convert(Emph, e)
Base.convert(::Type{Inline}, e::Markdown.Italic) = convert(Emph, e)
Base.convert(::Type{Emph}, e::Markdown.Italic) = Emph(Inline[i for i in e.text])
Base.convert(::Type{Element}, e::Markdown.Bold) = convert(Strong, e)
Base.convert(::Type{Inline}, e::Markdown.Bold) = convert(Strong, e)
Base.convert(::Type{Strong}, e::Markdown.Bold) = Strong(Inline[i for i in e.text])
Base.convert(::Type{Element}, e::Markdown.Paragraph) = convert(Para, e)
function Base.convert(::Type{T}, e::Markdown.Paragraph) where T<:Union{Para,Plain}
content = Inline[]
for c in e.content
if c isa AbstractString
for s in split(c)
push!(content, convert(Str, s))
push!(content, Space())
end
if length(content) > 0 && content[end] isa Space
pop!(content) # remove last Space()
end
else
push!(content, c)
end
push!(content, Space())
end
if length(content) > 0 && content[end] isa Space
pop!(content) # remove last Space()
end
return T(content)
end
Base.convert(::Type{Element}, e::Markdown.HorizontalRule) = convert(HorizontalRule, e)
Base.convert(::Type{HorizontalRule}, e::Markdown.HorizontalRule) = HorizontalRule()
Base.convert(::Type{Element}, e::Markdown.LineBreak) = convert(LineBreak, e)
Base.convert(::Type{Inline}, e::Markdown.LineBreak) = convert(LineBreak, e)
Base.convert(::Type{LineBreak}, e::Markdown.LineBreak) = LineBreak()
Base.convert(::Type{Element}, e::Markdown.BlockQuote) = convert(BlockQuote, e)
Base.convert(::Type{Block}, e::Markdown.BlockQuote) = convert(BlockQuote, e)
function Base.convert(::Type{BlockQuote}, e::Markdown.BlockQuote)
content = Block[]
for c in e.content
if c isa Markdown.Paragraph
push!(content, convert(Para, c))
else
push!(content, c)
end
end
return BlockQuote(content)
end
Base.convert(::Type{Element}, e::Markdown.Code) = convert(CodeBlock, e)
Base.convert(::Type{Block}, e::Markdown.Code) = convert(CodeBlock, e)
Base.convert(::Type{CodeBlock}, e::Markdown.Code) = CodeBlock(Attributes("", [e.language], []), e.code)
Base.convert(::Type{Inline}, e::Markdown.Code) = convert(Code, e)
Base.convert(::Type{Code}, e::Markdown.Code) = Code(Attributes("", [e.language], []), e.code)
Base.convert(::Type{Element}, e::Markdown.List) = e.ordered == 1 ? convert(OrderedList, e) : convert(BulletList, e)
Base.convert(::Type{Block}, e::Markdown.List) = e.ordered == 1 ? convert(OrderedList, e) : convert(BulletList, e)
function Base.convert(::Type{BulletList}, e::Markdown.List)
content = Vector{Block}[]
for items in e.items
block = Block[]
for item in items
if item isa Markdown.Paragraph
push!(block, convert(Plain, item))
else
push!(block, item)
end
end
push!(content, block)
end
return BulletList(content)
end
function Base.convert(::Type{OrderedList}, e::Markdown.List)
content = Vector{Block}[]
for items in e.items
block = Block[]
for item in items
if item isa Markdown.Paragraph
push!(block, convert(Plain, item))
else
push!(block, item)
end
end
push!(content, block)
end
# always returns list level 1 with Decimal with Period
return OrderedList(ListAttributes(1, Pandoc.Decimal, Pandoc.Period), content)
end
Base.convert(::Type{Element}, e::Markdown.LaTeX) = convert(Inline, e)
Base.convert(::Type{Inline}, e::Markdown.LaTeX) = convert(Math, e)
Base.convert(::Type{Math}, e::Markdown.LaTeX) = Math(InlineMath, e.formula)
Base.convert(::Type{Inline}, e::Markdown.Image) = convert(Image, e)
Base.convert(::Type{Image}, e::Markdown.Image) = Image(Attributes(), [], Target(e.url, e.alt))
Base.convert(::Type{Element}, e::Markdown.Footnote) = convert(Inline, e)
Base.convert(::Type{Inline}, e::Markdown.Footnote) = convert(Note, e)
function Base.convert(::Type{Note}, e::Markdown.Footnote)
isnothing(e.text) && return Note([])
content = Block[]
for p in e.text
push!(content, p)
end
return Note(content)
end
### overloads
# fallback
compare(x, y) = false
function compare(x::T, y::T) where T
equality = true
for fieldname in fieldnames(T)
equality = equality && ( getfield(x, fieldname) == getfield(y, fieldname) )
end
return equality
end
Base.:(==)(x::T, y::T) where T<:Pandoc.Element = compare(x, y)
Base.:(==)(x::Attributes, y::Attributes) = compare(x, y)
Base.:(==)(x::Citation, y::Citation) = compare(x, y)
Base.:(==)(x::ListAttributes, y::ListAttributes) = compare(x, y)
Base.:(==)(x::Target, y::Target) = compare(x, y)
end # module
|
proofpile-julia0005-42681 | {
"provenance": "014.jsonl.gz:242682"
} | using Clang.wrap_c
include("constants.jl")
headers = [joinpath(LIB_FOLDER, "iaea_phsp.h")]
wc = wrap_c.init(
headers=headers,
output_file="raw.jl",
header_library = _ -> :IAEA_LIB,
)
wc.headers = headers
run(wc)
|
proofpile-julia0005-42682 | {
"provenance": "014.jsonl.gz:242683"
} | module GraphUtilities
using GenericTensorNetworks, Graphs
using JSON, DelimitedFiles
using Configurations, Random
using LegibleLambdas
using Requires
function __init__()
@require UnitDiskMapping="1b61a8d9-79ed-4491-8266-ef37f39e1727" using UnitDiskMapping
end
export GraphProblemConfig, problem_instance, foldername
export SmallGraphConfig, DiagGraphConfig, SquareGraphConfig, RegularGraphConfig, MISProjectGraphConfig, MappedRegularGraphConfig
export save_property, load_property
export unified_solve
include("config.jl")
include("fileio.jl")
include("json.jl")
include("verify.jl")
end
|
proofpile-julia0005-42683 | {
"provenance": "014.jsonl.gz:242684"
} | """
function build_variable_dictionary(data::Dict)
Builds a dictionary which is necessary to build the H matrix.
This dictionary allows to pass the correct variables to each measurement function.
"""
function build_variable_dictionary(data::Dict)
bus_terms = [bus["terminals"] for (b, bus) in data["bus"]]
ref_bus = [bus["index"] for (b, bus) in data["bus"] if bus["bus_type"]==3]
@assert length(ref_bus) == 1 "There is more than one reference bus, double-check model"
ref_bus_term = data["bus"]["$(ref_bus[1])"]["terminals"]
total_vm = sum(length.(bus_terms))
total_va = sum(length.(bus_terms)) - length(ref_bus_term)
variable_dict = Dict{String, Any}()
variable_dict["vm"] = Dict{String, Any}()
variable_dict["va"] = Dict{String, Any}()
vmcount = 1
for (b, bus) in data["bus"]
variable_dict["vm"][b] = Dict{String, Any}()
for i in bus["terminals"]
variable_dict["vm"][b]["$i"] = Dict{String, Any}()
variable_dict["vm"][b]["$i"] = vmcount
vmcount+=1
end
end
vacount = total_vm+1
for (b, bus) in data["bus"]
if b != "$(ref_bus[1])"
variable_dict["va"][b] = Dict{String, Any}()
for i in bus["terminals"]
variable_dict["va"][b]["$i"] = Dict{String, Any}()
variable_dict["va"][b]["$i"] = vacount
vacount+=1
end
end
end
return variable_dict
end
function variables_of_buses(Bi::Array, variable_dict::Dict)
vm_indices = []
va_indices = []
for bus in Bi
push!(vm_indices, variable_dict["vm"]["$bus"])
if haskey(variable_dict["va"], "$bus") push!(va_indices, variable_dict["va"]["$bus"]) end # if it is a ref bus, the key does not exist
end
return vm_indices, va_indices
end
"""
Function to build multi branch input for the injection functions (as injections can occur at buses with multiple incoming/outcoming branches,
which is not the case for 'flows', which only refer to a single branch )
"""
function build_multibranch_input(qmeas, meas, data, variable_dict)
cmp_bus = occursin("g", string(qmeas)) ? data["gen"]["$(meas["cmp_id"])"]["gen_bus"] : data["load"]["$(meas["cmp_id"])"]["load_bus"]
vm_cmp_bus = variable_dict["vm"]["$cmp_bus"]
va_cmp_bus = haskey(variable_dict["va"], "$cmp_bus") ? variable_dict["va"]["$cmp_bus"] : Dict{String, Any}() # is empty if cmp_bus is the ref bus
conn_branches = [branch for (_, branch) in data["branch"] if (branch["f_bus"] == cmp_bus || branch["t_bus"] == cmp_bus)]
g = []
b = []
G_fr = []
B_fr = []
t_buses = []
f_conn = []
t_conn = []
for branch in conn_branches
push!(g, _PMD.calc_branch_y(branch)[1])
push!(b, _PMD.calc_branch_y(branch)[2])
push!(G_fr, branch["g_fr"])
push!(B_fr, branch["b_fr"])
push!(t_buses, branch["t_bus"])
push!(t_buses, branch["f_bus"])
push!(f_conn, branch["f_connections"])
push!(t_conn, branch["t_connections"])
end
t_buses = filter(x-> x!=cmp_bus, t_buses) |> unique
vm_adj_bus, va_adj_bus = variables_of_buses(t_buses, variable_dict)
return cmp_bus, vm_cmp_bus, va_cmp_bus, conn_branches, f_conn, t_conn, g, b, G_fr, B_fr, t_buses, vm_adj_bus, va_adj_bus
end
"""
Function to build single branch input for the flow measurement functions (as 'flows' only refer to a single branch, as opposed to
injections, which can occur at buses with multiple incoming/outcoming branches)
"""
function build_singlebranch_input(meas, data, variable_dict)
branch = data["branch"]["$(meas["cmp_id"])"]
f_bus = branch["f_bus"]
t_bus = branch["t_bus"]
vm_fr = variable_dict["vm"]["$f_bus"]
va_fr = haskey(variable_dict["va"], "$f_bus") ? variable_dict["va"]["$f_bus"] : Dict{String, Any}() # is empty if cmp_bus is the ref bus
g, b = _PMD.calc_branch_y(branch)
G_fr = branch["g_fr"]
B_fr = branch["b_fr"]
vm_to, va_to = variables_of_buses([t_bus], variable_dict)
return f_bus, vm_fr, va_fr, [1], [branch["f_connections"]], [branch["t_connections"]], [g], [b], [G_fr], [B_fr], t_bus, vm_to, va_to
end
"""
Given a state estimation or powerflow result or similar dictionary `se_sol`, and the system variables `variable_dict`,
this function builds an array with the numerical state of the system, e.g., variables in `variable_dict`
are assigned a scalar that correspond to their value in `se_sol`.
The resulting array has the same index order of that of `h_functions`.
"""
function build_state_array(sol_dict::Dict, variable_dict::Dict)
state_length = sum( length(val) for (_, val) in variable_dict["vm"]) + sum( length(val) for (_, val) in variable_dict["va"])
state_array = zeros(Float64, (state_length,) )
for (key, val) in variable_dict["vm"]
bus_idx = minimum([idx for (_, idx) in val])
for i in 1:length(val)
state_array[bus_idx+i-1] = sol_dict["solution"]["bus"][key]["vm"][i]
end
end
for (key, val) in variable_dict["va"]
bus_idx = minimum([idx for (_, idx) in val])
for i in 1:length(val)
state_array[bus_idx+i-1] = sol_dict["solution"]["bus"][key]["va"][i]
end
end
return state_array
end
"""
Adds "virtual" zero zib residuals to a state estimation solution dictionary `se_sol`.
The `data` dictionary must have measurement data for these zibs, which can be added
with _PMDSE.add_zib_virtual_meas!
"""
function add_zib_virtual_residuals!(se_sol::Dict, data::Dict)
original_meas = [m for (m, meas) in se_sol["solution"]["meas"]]
for (m, meas) in data["meas"]
if m ∉ original_meas
se_sol["solution"]["meas"][m] = Dict("res"=>zeros(length(meas["dst"])))
end
end
end |
proofpile-julia0005-42684 | {
"provenance": "014.jsonl.gz:242685"
} | # Autogenerated wrapper script for libsingular_julia_jll for x86_64-linux-musl-cxx03-julia_version+1.6.0
export libsingular_julia
using libcxxwrap_julia_jll
using Singular_jll
JLLWrappers.@generate_wrapper_header("libsingular_julia")
JLLWrappers.@declare_library_product(libsingular_julia, "libsingular_julia.so")
function __init__()
JLLWrappers.@generate_init_header(libcxxwrap_julia_jll, Singular_jll)
JLLWrappers.@init_library_product(
libsingular_julia,
"lib/libsingular_julia.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
proofpile-julia0005-42685 | {
"provenance": "014.jsonl.gz:242686"
} | a = VecE2()
@test typeof(a) <: VecE
@test typeof(a) <: AbstractVec
@test a.x == 0.0
@test a.y == 0.0
b = VecE2(0.0,0.0)
@test b.x == 0.0
@test b.y == 0.0
@test length(a) == 2
@test a == b
@test isequal(a, b)
@test copy(a) == a
@test convert(Vector{Float64}, a) == [0.0,0.0]
@test convert(VecE2, [0.0,0.0]) == a
@test isapprox(polar(1.0,0.0), VecE2(1.0,0.0))
@test isapprox(polar(2.0,π/2), VecE2(0.0,2.0))
@test isapprox(polar(3.0,-π/2), VecE2(0.0,-3.0))
@test isapprox(polar(-0.5,1.0*π), VecE2(0.5,0.0))
a = VecE2(0.0,1.0)
b = VecE2(0.5,2.0)
@test a != b
@test a + b == VecE2(0.5, 3.0)
@test a + 1 == VecE2(1.0, 2.0)
@test a + 0.5 == VecE2(0.5, 1.5)
@test a - b == VecE2(-0.5, -1.0)
@test a - 1 == VecE2(-1.0, 0.0)
@test a - 0.5 == VecE2(-0.5, 0.5)
@test a * 2 == VecE2(0.0, 2.0)
@test a * 0.5 == VecE2(0.0, 0.5)
@test a / 2 == VecE2(0.0, 0.5)
@test a / 0.5 == VecE2(0.0, 2.0)
@test b^2 == VecE2(0.25, 4.0)
@test b^0.5 == VecE2(0.5^0.5, 2.0^0.5)
@test a % 2.0 == VecE2(0.0, 1.0)
@test isfinite(a)
@test !isfinite(VecE2(Inf,0))
@test !isfinite(VecE2(0,Inf))
@test !isfinite(VecE2(0,-Inf))
@test !isfinite(VecE2(Inf,Inf))
@test !isinf(a)
@test isinf(VecE2(Inf,0))
@test isinf(VecE2(0,Inf))
@test isinf(VecE2(0,-Inf))
@test isinf(VecE2(Inf,Inf))
@test !isnan(a)
@test isnan(VecE2(NaN,0))
@test isnan(VecE2(0,NaN))
@test isnan(VecE2(NaN,NaN))
@test round(VecE2(0.25,1.75)) == VecE2(0.0,2.0)
@test round(VecE2(-0.25,-1.75)) == VecE2(-0.0,-2.0)
@test floor(VecE2(0.25,1.75)) == VecE2(0.0,1.0)
@test floor(VecE2(-0.25,-1.75)) == VecE2(-1.0,-2.0)
@test ceil(VecE2(0.25,1.75)) == VecE2(1.0,2.0)
@test ceil(VecE2(-0.25,-1.75)) == VecE2(-0.0,-1.0)
@test trunc(VecE2(0.25,1.75)) == VecE2(0.0,1.0)
@test trunc(VecE2(-0.25,-1.75)) == VecE2(-0.0,-1.0)
@test clamp(VecE2(1.0, 10.0), 0.0, 5.0) == VecE2(1.0, 5.0)
@test clamp(VecE2(-1.0, 4.0), 0.0, 5.0) == VecE2(0.0, 4.0)
c = VecE2(3.0,4.0)
@test isapprox(abs(a), 1.0)
@test isapprox(abs(c), 5.0)
@test isapprox(hypot(a), 1.0)
@test isapprox(hypot(c), 5.0)
@test isapprox(abs2(a), 1.0)
@test isapprox(abs2(c), 25.0)
@test isapprox(norm(a), VecE2(0.0,1.0))
@test isapprox(norm(b), VecE2(0.5/sqrt(4.25),2.0/sqrt(4.25)))
@test isapprox(norm(c), VecE2(3.0/5,4.0/5))
@test isapprox(dist(a,a), 0.0)
@test isapprox(dist(b,b), 0.0)
@test isapprox(dist(c,c), 0.0)
@test isapprox(dist(a,b), hypot(0.5, 1.0))
@test isapprox(dist(a,c), hypot(3.0, 3.0))
@test isapprox(dist2(a,a), 0.0)
@test isapprox(dist2(b,b), 0.0)
@test isapprox(dist2(c,c), 0.0)
@test isapprox(dist2(a,b), 0.5^2 + 1^2)
@test isapprox(dist2(a,c), 3^2 + 3^2)
@test isapprox(dot(a, b), 2.0)
@test isapprox(dot(b, c), 1.5 + 8.0)
@test isapprox(dot(c, c), abs2(c))
@test isapprox(proj(b, a, Float64), 2.0)
@test isapprox(proj(c, a, Float64), 4.0)
@test isapprox(proj(b, a, VecE2), VecE2(0.0, 2.0))
@test isapprox(proj(c, a, VecE2), VecE2(0.0, 4.0))
@test isapprox(lerp(VecE2(0,0), VecE2(2,-2), 0.25), VecE2(0.5,-0.5))
@test isapprox(lerp(VecE2(2,-2), VecE2(3,-3), 0.5), VecE2(2.5,-2.5))
@test isapprox(rot180(b), VecE2(-0.5,-2.0))
@test isapprox(rotr90(b), VecE2( 2.0,-0.5))
@test isapprox(rotl90(b), VecE2(-2.0, 0.5))
@test isapprox(rot(b, 1.0*pi), VecE2(-0.5,-2.0))
@test isapprox(rot(b, -π/2), VecE2( 2.0,-0.5))
@test isapprox(rot(b, π/2), VecE2(-2.0, 0.5))
@test isapprox(rot(b, 2π), b)
|
proofpile-julia0005-42686 | {
"provenance": "014.jsonl.gz:242687"
} |
#const XYIterGMMorLevel=Union{XYIterLevel, XYIterGMM}
iterloss(Θ::AbstractVolumePartsIter{TM,TV,T}, Xv::XYIterControl, RegressionType::Val;
) where {TM, TV, T} = sum(
(Θ(Xv, RegressionType) .- Xv.ṽ).^2)
function currentloss(Θ::AbstractVolumePartsIter{TM,TV,T}, Xv::XYIterControl,
absμₖ::TM) where {TM<:CuMatrix, TV<:CuVector, T}
someones::CuVector{T} = CUDA.ones(T, size(Θ.A,1))
return sum((absμₖ*someones .- Xv.ṽ).^2)
end
function testiterloss(Θ::AbstractVolumePartsIter{TM,TV,T}, Xv::XYIterControl;
RegressionType::Val=Val(:choleskyzygote)) where {TM, TV, T}
vhattest = iterloss(deepcopy(Θ), Xv, RegressionType)
vhatver = sum((Θ(Xv, RegressionType) .- Xv.ṽ) .^ 2)
@assert vhatver ≈ vhattest
end
function itermodel(panel::AbstractDataFrame, zs::ZSpec,::Val{:ols},
::Type{T}=PARAM[:itertype],
::Type{TV}=PARAM[:itergpu] ? CUDA.CuVector{T} : Vector{T},
::Type{TM}=PARAM[:itergpu] ? CUDA.CuMatrix{T} : Matrix{T},
PredictionType::Val = Val(PARAM[:predictiontype]),
RegressionType::Val = Val(PARAM[:iterregressiontype]),
SolveType::Val = Val(PARAM[:itersolvetype]),
; kwargs... #in case we want to use kwargs in other methods
) where {
T<:Real, TV<:AbstractVector{T}, TM<:AbstractMatrix{T}}
(T == Float32) && (PARAM[:minweightmagnitude]<10^(-7)) && @warn(
"T=$T while PARAM[:minweightmagnitude]=$(PARAM[:minweightmagnitude]).
Therefore the minweight is effectivelly 0- is this really what you want?")
CUDA.allowscalar(false)
Zms::MeasureSpec = zs.Zms
Fw::Vector{Symbol} = Fweights(Zms, :Fw)
FRtw::Vector{Symbol} = Fweights(Zms, :FRtw)
FRLw::Vector{Symbol} = Fweights(Zms, :FRLw)
FW::Vector{Symbol} = Zms.FW
FWgroup = Fgroupedcontrols(Zms)
#form the data into a more usable format and expand the arrays
p = modelparts(panel, T,TV,TM,
weightdata = (;Fw, FRtw, FRLw), Fvol=Zms.Fvol; zs, FW, FWgroup)
dims = p.dims
tidx::Dict = p.tidx
cleanpanel::SubDataFrame = p.cleanpanel
Ncleanpanel::Int = nrow(cleanpanel)
ws::TM = p.dat[:Fw]
Rtws::TM = p.dat[:FRtw]
RLws::TM = p.dat[:FRLw]
W::TM = p.dat[:FW]
Wgroup = p.dat[:FWgroup]
ts::Vector{Int} = p.ts
v::TV = p.dat[:Fvol]
#println("ts: $ts")
Xv = XYIterControl(ws, Rtws, RLws, v, ts, W=W, Wgroup=Wgroup)
#construct the parameter
Θ = VolumePartsIter(dims.T, dims.K, ts, T)
local λ⁺::T = T(-1)
#for debugging purposes- will trhow an error at some point if true
PARAM[:runstacktraceoniter] && solveA!(iterloss,Θ,Xv,SolveType)
#main optimization routine
try
λ⁺ = solveA!(iterloss,Θ,Xv,SolveType)
catch err
errmessage = "$err"
if length(errmessage) > 20_000 #sometimes we get really long error messages
errmessage = errmessage[1:20_000]
end
#@warn "Solver failed with error $errmessage"
@error "Solver failed with error $errmessage\n" exception=(err, catch_backtrace())
finally
results::DataFrame = formresults(panel, Zms, Θ, Xv, Val{:iter}())
#recompute the loss if needed
if λ⁺<0
throw("λ⁺ < 0!! This probably means the gpu failed. If thats not what happened
it shouldn't be too hard to fix up the code and allow for this scenario. If it is,
Just restart Julia and rerun.")
λ⁺ = updateA₁!(Θ.g|>TV|>deepcopy, iterloss, Θ, Xv, RegressionType, Val{:identity}())
updateAfromGAₜ!(Θ,Xv,1) #update to the final values of A
#currentloss(Θ, Xv, projectvolumefromA(Θ,Xv))
λ⁺ = lossfromA(Θ, Xv, RegressionType)
end
λ⁺ = isfinite(λ⁺) ? Int(round(min(λ⁺, typemax(Int)÷10))) : T(-99.0)
return λ⁺, results
end
@assert false #should never reach here due to the finalizer
end
function itermodel(panel::AbstractDataFrame, zs::ZSpec,::Val{:bootstrap},
::Type{T}=PARAM[:itertype],
::Type{TV}=PARAM[:iterbootstrapgpu] ? CUDA.CuVector{T} : Vector{T},
::Type{TM}=PARAM[:iterbootstrapgpu] ? CUDA.CuMatrix{T} : Matrix{T},
PredictionType::Val = Val(PARAM[:predictiontype]),
RegressionType::Val = Val(PARAM[:iterregressiontype]);
funds, kwargs...
) where {
T<:Real, TV<:AbstractVector{T}, TM<:AbstractMatrix{T}}
(T == Float32) && throw(
"If you really want to use Float32 using the bootstrap approach,
you can disable this.")
CUDA.allowscalar(false)
Zms::MeasureSpec = zs.Zms
Fw::Vector{Symbol} = Fweights(Zms, :Fw)
FRtw::Vector{Symbol} = Fweights(Zms, :FRtw)
FRLw::Vector{Symbol} = Fweights(Zms, :FRLw)
FW::Vector{Symbol} = Zms.FW
FWgroup = Fgroupedcontrols(Zms)
#form the data into a more usable format and expand the arrays
p = modelparts(panel, T,TV,TM,
weightdata = (;Fw, FRtw, FRLw), Fvol=Zms.Fvol; zs, FW, FWgroup)
dims = p.dims
tidx::Dict = p.tidx
cleanpanel::SubDataFrame = p.cleanpanel
Ncleanpanel::Int = nrow(cleanpanel)
ws::TM = p.dat[:Fw]
Rtws::TM = p.dat[:FRtw]
RLws::TM = p.dat[:FRLw]
W::TM = p.dat[:FW]
Wgroup = p.dat[:FWgroup]
ts::Vector{Int} = p.ts
v::TV = p.dat[:Fvol]
#println("ts: $ts")
Xv = XYIterControl(ws, Rtws, RLws, v, ts, W=W, Wgroup=Wgroup)
#construct the parameter
Θ = VolumePartsIter(dims.T, dims.K, ts, T, TM, TV)
G = extractGfromfunds(TM; funds, zs, tidx, ts)
println("dims G: $(size(G))
dims Θ.G: $(size(Θ.G))")
Θ.G .= G
local λ⁺::T = T(-1)
#for debugging purposes- will trhow an error at some point if true
PARAM[:runstacktraceoniter] && solveA!(iterloss,Θ,Xv,
Val{:bootstrap}())
#main optimization routine
#try
λ⁺ = solveA!(iterloss,Θ,Xv,Val{:bootstrap}())
#=catch err
errmessage = "$err"
if length(errmessage) > 20_000 #sometimes we get really long error messages
errmessage = errmessage[1:20_000]
end
#@warn "Solver failed with error $errmessage"
@error "Solver failed with error $errmessage\n" exception=(err, catch_backtrace())
finally
results::DataFrame = formresults(panel, Zms, Θ, Xv, Val{:iter}())
#recompute the loss if needed
if λ⁺<0
throw("λ⁺ < 0!! This probably means the gpu failed. If thats not what happened
it shouldn't be too hard to fix up the code and allow for this scenario. If it is,
Just restart Julia and rerun.")
λ⁺ = updateA₁!(Θ.g|>TV|>deepcopy, iterloss, Θ, Xv, RegressionType, Val{:identity}())
updateAfromGAₜ!(Θ,Xv,1) #update to the final values of A
#currentloss(Θ, Xv, projectvolumefromA(Θ,Xv))
λ⁺ = lossfromA(Θ, Xv, RegressionType)
end=#
results::DataFrame = formresults(panel, Zms, Θ, Xv, Val{:iter}())
results = addfundcolstoresults(results; funds, zs, tidx, ts)
λ⁺ = isfinite(λ⁺) ? Int(round(min(λ⁺, typemax(Int)÷10))) : T(-99.0)
return λ⁺, results
#end
@assert false #should never reach here due to the finalizer
end
#extracts the G from the fund file
function extractGfromfunds(::Type{TM}; funds, zs, tidx, ts, fundsource=PARAM[:iterfundsource],
fundassetsprefix = PARAM[:iterfundassetprefix]) where TM <: AbstractMatrix
ms = zs.originalms
f = funds[fundsource] |> deepcopy
alldates = sort(keys(tidx) |> collect)
@assert issorted(f, :date)
funddates = f.date
#verify we have coverage of the growth rates over the estimation period
@assert (setdiff(alldates, funddates) |> isempty) "
setdiff(alldates,funddates): $(setdiff(alldates,funddates))"
cols = ms.Fξs .|> (s->Symbol(fundassetsprefix, s))
cleanfunds = completecases(f, cols)
A = f[f.date .|> (d->insorted(d, alldates)), cols] |> Matrix{Float64}
G = (A[2:end, :] ./ A[1:(end-1),:])' |> TM
#NOTE: this approach won't work with smoothing- but wouldn't smoothing
#break the accounting relationship?
return G
end
function addfundcolstoresults(results; funds, zs, tidx, ts, fundsource=PARAM[:iterfundsource],
fundassetsprefix = PARAM[:iterfundassetprefix],
additionalfundscols = PARAM[:iteradditionalfundscols]) where TM <: AbstractMatrix
ms = zs.originalms
f = funds[fundsource] |> deepcopy
alldates = sort(keys(tidx) |> collect)
@assert issorted(f, :date)
funddates = f.date
#verify we have coverage of the growth rates over the estimation period
@assert (setdiff(alldates, funddates) |> isempty) "
setdiff(alldates,funddates): $(setdiff(alldates,funddates))"
cols = [ms.Fξs .|> (s->Symbol(fundassetsprefix, s)); additionalfundscols]
cleanfunds = completecases(f, cols)
colstoadd = f[f.date .|> (d->insorted(d, alldates)), [:date; cols;]] |> deepcopy
newcolnames = cols .|> (s)->Symbol(fundsource, s)
rename!(colstoadd, Dict(cols .=> newcolnames))
results = leftjoin(results, colstoadd, on=:date, validate=(true,true))
#NOTE: this approach won't work with smoothing- but wouldn't smoothing
#break the accounting relationship?
return results
end
|
proofpile-julia0005-42687 | {
"provenance": "014.jsonl.gz:242688"
} | using Random
temp = zeros(Int64, 3, 2, 4)
for i in 1:3
for j in 1:2
for k in 1:4
temp[i,j,k] = i*j*k
end
end
end
temp
using HDF5
temp[:]
h5write("../DHC/scratch_AKS/order_test.h5", "main/data", temp[:])
for
temp = zeros(Int64, 3, 3)
for i in 1:3
for j in 1:3
temp[i,j] = i*(j+1)
end
end
temp
temp[:]
temp[1,2]
|
proofpile-julia0005-42688 | {
"provenance": "014.jsonl.gz:242689"
} | # exports
export create, Sphere, show
mutable struct Sphere <: Shape
radius::Float64
end
function Base.show(io::IO, x::Sphere)
println(io, "Sphere")
println(io, "Radius: $(x.radius)")
end
function create(c::Sphere; resolution=nothing, rand_=0.0, type::Int64=1)
radius = c.radius
bounds = [-radius radius; -radius radius; -radius radius]
x, v, y, vol = create(Cuboid(bounds), resolution=resolution, rand_=rand_)
mask = vec(sum(x.^2, dims=1) .<= radius^2)
mesh = x[:, mask]
# x, v, y, vol
return mesh, zeros(size(mesh)), copy(mesh), vol, id*ones(Int64, length(vol))
end |
proofpile-julia0005-42689 | {
"provenance": "014.jsonl.gz:242690"
} | include("dynamic_piomelli.jl")
@par function realspace_dynamic_les_calculation!(s::A) where {A<:@par(AbstractSimulation)}
real!(s.lesmodel.û)
real!(s.lesmodel.M)
if is_dynP_les(A)
real!(s.lesmodel.ŵ)
end
if is_piomelliSmag_les(A)
dt = get_dt(s)
dtm1 = s.lesmodel.dtm1[]
dtf = dt == 0.0 ? 1.0 : dt
dtm1f = dtm1 == 0.0 ? dtf : dtm1
if s.iteration[] == 0
copyto!(s.lesmodel.cm1.rr,s.lesmodel.c.rr)
end
calc_cstar!(s.lesmodel.c.rr,s.lesmodel.cm1.rr,dtf,dtm1f)
s.lesmodel.dtm1[] = dt
end
if is_piomelliP_les(A)
dt = get_dt(s)
dtm1 = s.lesmodel.dtpm1[]
dtf = dt == 0.0 ? 1.0 : dt
dtm1f = dtm1 == 0.0 ? dtf : dtm1
if s.iteration[] == 0
copyto!(s.lesmodel.cpm1.rr,s.lesmodel.cp.rr)
end
calc_cstar!(s.lesmodel.cp.rr,s.lesmodel.cpm1.rr,dtf,dtm1f)
s.lesmodel.dtpm1[] = dt
end
@mthreads for j in TRANGE
realspace_dynamic_les_calculation!(s,j)
end
return nothing
end
@par function realspace_dynamic_les_calculation!(s::A,j::Integer) where {A<:@par(AbstractSimulation)}
u = s.u.rr
L = s.lesmodel.L.rr
tau = s.lesmodel.tau.rr
Sa = s.lesmodel.S.rr
Δ² = s.lesmodel.Δ²
if is_piomelliSmag_les(A)
c = s.lesmodel.c.rr
end
if is_dynP_les(A)
w = s.rhs.rr
P = s.lesmodel.P.rr
if is_piomelliP_les(A)
cp = s.lesmodel.cp.rr
end
end
@inbounds @msimd for i in REAL_RANGES[j]
S = tau[i]
if is_piomelliSmag_les(A)
Sa[i] = 2*c[i]*Δ²*norm(S)*S
else
Sa[i] = Δ²*norm(S)*S
end
v = u[i]
L[i] = symouter(v,v)
if is_piomelliP_les(A)
P[i] = cp[i]*Δ²*Lie(S,AntiSymTen(0.5*w[i]))
elseif is_dynSandP_les(A)
P[i] = Δ²*Lie(S,AntiSymTen(0.5*w[i]))
end
end
return nothing
end
@par function fourierspace_dynamic_les_calculation!(s::A) where {A<:@par(AbstractSimulation)}
fullfourier!(s.lesmodel.S)
fullfourier!(s.lesmodel.L)
is_dynP_les(A) && fullfourier!(s.lesmodel.P)
ind = CartesianIndices((Base.OneTo(NY),Base.OneTo(NZ)))
@mthreads for jk in ind
j,k = Tuple(jk)
fourierspace_dynamic_les_calculation!(s,j,k)
end
real!(s.lesmodel.S)
real!(s.lesmodel.L)
is_dynP_les(A) && real!(s.lesmodel.P)
return nothing
end
@par function fourierspace_dynamic_les_calculation!(s::A,j::Integer, k::Integer) where {A<:@par(AbstractSimulation)}
S = s.lesmodel.S.c
L = s.lesmodel.L.c
Δ̂² = s.lesmodel.Δ̂²
if is_dynP_les(A)
P = s.lesmodel.P.c
end
@inbounds begin
@msimd for i in Base.OneTo(NX)
G = Gaussfilter(Δ̂²,i,j,k)
S[i,j,k] *= G
L[i,j,k] *= G
if is_dynP_les(A)
P[i,j,k] *= G
end
end
end
end
@par function coefficient_les_calculation!(s::A) where {A<:@par(AbstractSimulation)}
@mthreads for j in TRANGE
coefficient_les_calculation!(s,j)
end
s.lesmodel.avg && average_c!(s.lesmodel.c,s.lesmodel.Δ̂²)
if is_dynP_les(s)
s.lesmodel.avg && average_c!(s.lesmodel.cp,s.lesmodel.Δ̂²)
end
return nothing
end
@par function coefficient_les_calculation!(s::A,j::Integer) where {A<:@par(AbstractSimulation)}
rhs = s.rhs.rr
τ = s.lesmodel.tau.rr
Δ² = s.lesmodel.Δ²
if haspassivescalarles(A)
φ = s.passivescalar.φ.field.data
fφ = s.passivescalar.flux.rr
has_les_scalar_vorticity_model(A) && (cφ = s.passivescalar.lesmodel.c*Δ²)
end
if hasdensityles(A)
ρ = s.densitystratification.ρ.field.data
f = s.densitystratification.flux.rr
has_les_density_vorticity_model(A) && (cρ = s.densitystratification.lesmodel.c*Δ²)
end
ca = s.lesmodel.c.rr
û = s.lesmodel.û.rr
La = s.lesmodel.L.rr
Ma = s.lesmodel.M.rr
Sa = s.lesmodel.S.rr
Δ̂² = s.lesmodel.Δ̂² + Δ²
#cmin = s.lesmodel.cmin
if is_dynP_les(A)
Pa = s.lesmodel.P.rr
ŵ = s.lesmodel.ŵ.rr
cpa = s.lesmodel.cp.rr
end
@inbounds @msimd for i in REAL_RANGES[j]
w = rhs[i]
S = τ[i]
L = traceless(symouter(û[i],û[i]) - La[i])
La[i] = L
Sh = Ma[i]
if is_dynSmag_les(A)
M = Δ̂²*norm(Sh)*Sh - Sa[i]
#c = max(0.0,0.5*((L:M)/(M:M)))
c = max(-1.0,min(1.0,0.5*((L:M)/(M:M))))
elseif is_piomelliSmag_les(A)
τ̂ = Sa[i]
if is_piomelliP_les(A)
τ̂ += Pa[i]
end
ah = norm(Sh)
M = 2*Δ̂²*ah*Sh
Mn = M/(M:M)
c = (τ̂ + L):Mn
c = max(-1.0,min(1.0,c))
#c = ifelse(ah<10.0,0.0,max(0.0,c))
end
ca[i] = c
if is_dynP_les(A)
wh = ŵ[i]
Ph = Lie(Sh,AntiSymTen(0.5*wh))
if is_dynSandP_les(A)
Mp = Δ̂²*Ph - Pa[i]
cp = max(-1.0,min(1.0,(L:Mp)/(Mp:Mp))) # Should I clip negative values?
#cp = max(0.0,cp)
elseif is_piomelliP_les(A)
if !is_piomelliSmag_les(A)
τ̂ = Pa[i]
end
Mp = Δ̂²*Ph
Mpn = Mp/(Mp:Mp)
cp = max(-1.0,min(1.0,(τ̂ + L):Mpn))
end
cpa[i] = cp
end
end
return nothing
end
|
proofpile-julia0005-42690 | {
"provenance": "014.jsonl.gz:242691"
} | # Importing the physical constants:
constants = Constants()
T = 28.0 - constants.K₀ # Current temperature
Tᵣ = 25.0 - constants.K₀ # Reference temperature
A = Fvcb()
@testset "Γ_star()" begin
@test PlantBiophysics.Γ_star(T,Tᵣ,constants.R) ==
PlantBiophysics.arrhenius(42.75,37830.0,T,Tᵣ,constants.R)
end;
@testset "standard arrhenius()" begin
@test PlantBiophysics.arrhenius(42.75,37830.0,T,Tᵣ,constants.R) ≈ 49.76935360399572
end;
@testset "arrhenius() with negative effect of too high T" begin
@test PlantBiophysics.arrhenius(A.JMaxRef,A.Eₐⱼ,T,Tᵣ,A.Hdⱼ,A.Δₛⱼ) ≈ 278.5161762418
# NB: value checked using plantecophys.
end;
@testset "arrhenius() with negative effect of too high T" begin
@test PlantBiophysics.arrhenius(A.JMaxRef,A.Eₐⱼ,T,Tᵣ,A.Hdⱼ,A.Δₛⱼ) ≈ 278.5161762418
# NB: value checked using plantecophys.
end;
@testset "compare arrhenius() implementations" begin
# arrhenius with negative effect of too high T should yield the same result as the standard Arrhenius
# when Δₛ = 0.0
@test PlantBiophysics.arrhenius(A.JMaxRef,A.Eₐⱼ,28.0-constants.K₀,A.Tᵣ-constants.K₀,A.Hdⱼ,0.0) ==
PlantBiophysics.arrhenius(A.JMaxRef,A.Eₐⱼ,28.0-constants.K₀,A.Tᵣ-constants.K₀)
end;
|
proofpile-julia0005-42691 | {
"provenance": "014.jsonl.gz:242692"
} | ## Utility functions
# Sum of square differences
sosdiff(a::Number,b::Number) = abs2(a-b)
sosdiff(A,B) = sum(ab -> sosdiff(ab...),zip(A,B))
# Hard thresholding
hard(x, beta) = abs(x) < beta ? zero(x) : x
# Partial objective
_obj(zlk,λ) = sum(z -> (abs(z) < sqrt(2λ) ? abs2(z)/2 : λ), zlk)
# Conversions between filter forms
_filtermatrix(hlist) =
(hcat([vec(h) for h in hlist]...)::Matrix{eltype(first(hlist))},
size(first(hlist)))
_filterlist(Hmatrix,R) = [reshape(h,map(n->n,R)) for h in eachcol(Hmatrix)]
|
proofpile-julia0005-42692 | {
"provenance": "014.jsonl.gz:242693"
} | using SymbolicCodegen
using Test
@testset "SymbolicCodegen.jl" begin
# Write your tests here.
end
|
proofpile-julia0005-42693 | {
"provenance": "014.jsonl.gz:242694"
} | using SFGTools
using Test
using DataFrames
using SFGTools
using Random
import Statistics: mean
const SAMPLE_DATA_DIR = joinpath(@__DIR__, "sampledata/")
function listtest()
grab(SAMPLE_DATA_DIR; getall=true)
grab(SAMPLE_DATA_DIR)
df = list_spectra()
@test size(df,1) == 211
df = list_spectra(date=(2020,5,20))
@test size(df,1) == 154
df = list_spectra(inexact="ssp")
@test size(df,1) == 209
df = list_spectra(exact="200518_CaAra_1_13mN_d-pumped_ssp_DLscan")
@test size(df,1) == 150
df = list_spectra(group=true)
@test size(df,1) == 6
end
function loadtest()
spectrum = load_spectra(63725661936614)
@test typeof(spectrum) == Array{SFGTools.SFSpectrum,1}
@test spectrum[1][1] == 778.0
@test_broken load_spectra(63725661936614, format=:tiff) ==
load_spectra(63725661936614, format=:sif)
spectrum = load_spectra(63725661936614)
@show typeof(spectrum)
spectrum
end
function attribute_test(spectrum)
meta = get_metadata(spectrum)
@test typeof(meta) == Dict{String,Any}
attr = get_attribute(spectrum, "ccd_exposure_time")
@test attr == Any[20.0, 20.0, 20.0, 20.0, 20.0]
wn = get_ir_wavenumber(spectrum)
@test typeof(wn) == Array{Float64,1}
end
function fieldcorrection_test()
a = Array{Float64,3}(undef, (4, 1, 2))
a[:,1,1] = [6, 7, 8, 9]
a[:,1,2] = [6, 8, 9, 7]
spectrum = SFSpectrum(0, a)
b = Array{Float64,3}(undef, (4, 1, 2))
b[:,1,1] = [0, 1, 3, 0]
b[:,1,2] = [2, 1, 1, 0]
bias = SFSpectrum(0, b)
d = Array{Float64,3}(undef, (4, 1, 2))
d[:,1,1] = [3, 4, 3, 5]
d[:,1,2] = [4, 2, 1, 5]
dark = SFSpectrum(0, d)
f = Array{Float64,3}(undef, (4, 1, 2))
f[:,1,1] = [2, 3, 4, 3]
f[:,1,2] = [4, 3, 6, 1]
flat = SFSpectrum(0, f)
l = Array{Float64,3}(undef, (4, 1, 2))
l[:,1,1] = [1, 2, 3, 0]
l[:,1,2] = [3, 2, 3, 2]
darkflat = SFSpectrum(0, l)
fieldcorrection!(spectrum, bias=bias, dark=dark, flat=flat, darkflat=darkflat)
@test spectrum[:,1,1] == [5.0, 8.0, 6.0, 8.0]
@test spectrum[:,1,2] == [5.0, 10.0, 7.0, 4.0]
fieldcorrection!(spectrum, dark=dark)
@test spectrum[:,1,1] == [1.5, 5.0, 4.0, 3.0]
@test spectrum[:,1,2] == [1.5, 7.0, 5.0, -1.0]
end
function rm_events_test()
a = rand(MersenneTwister(0), 50)
a[19:20] .*= 5
a[22] *= 40
num_removed_events = rm_events!(a, minstd=2)
@test num_removed_events == 2
end
function average_test()
a = Array{Float64,3}(undef, (4, 1, 2))
a[:,1,1] = [6, 7, 8, 9]
a[:,1,2] = [6, 8, 9, 7]
spectrum1 = SFSpectrum(63725661936614, a)
b = Array{Float64,3}(undef, (4, 1, 3))
b[:,1,1] = [0, 1, 3, 0]
b[:,1,2] = [2, 1, 1, 0]
b[:,1,3] = [4, 1, 2, 0]
spectrum2 = SFSpectrum(63725661936614, b)
spectrum = average(spectrum1)
@test_broken spectrum[:,1,1] == [6.0, 7.5, 8.5, 8.0]
spectrum = average(spectrum1, combine = false)
@test_broken spectrum[:,1,1] == [6, 7, 8, 9]
@test_broken spectrum[:,1,2] == [6, 8, 9, 7]
spectrum = average(spectrum2)
@test_broken spectrum[:,1,1] == [2.0, 1.0, 2.0, 0]
spectrum = average(spectrum2, combine = false)
@test_broken spectrum[:,1,1] == [0, 1, 3, 0]
@test_broken spectrum[:,1,2] == [2, 1, 1, 0]
@test_broken spectrum[:,1,3] == [4, 1, 2, 0]
end
# function save_mat_test(spectra)
# success = false
# try
# save_mat(tempname(), spectra)
# success = true
# catch
# success = false
# end
# @test success == true
# end
function makespectraarray(spectrum::SFSpectrum)
spectra = Array{SFSpectrum,1}(undef, size(spectrum,2))
for i = 1:size(spectrum, 2)
spectra[i] = SFSpectrum(spectrum.id, spectrum[:,i,1])
end
spectra
end
@testset "list_spectra Tests" begin listtest() end
@testset "load_spectra Tests" begin global spectrum = loadtest()[1] end
@testset "Attribute Tests" begin attribute_test(spectrum) end
@testset "Fieldcorrection Tests" begin fieldcorrection_test() end
@testset "Event Removal Tests" begin rm_events_test() end
@testset "average Test" begin average_test() end
# spectra = makespectraarray(spectrum)
# @testset "MAT Saving" begin save_mat_test(spectra) end
df |
proofpile-julia0005-42694 | {
"provenance": "014.jsonl.gz:242695"
} | lines = open(readlines, "FileCG.txt")
newlines = sort(lines)
writedlm("FileCG.txt",newlines)
lines = open(readlines, "FileCR.txt")
newlines = sort(lines)
writedlm("FileCR.txt",newlines)
|
proofpile-julia0005-42695 | {
"provenance": "014.jsonl.gz:242696"
} | function _promote(args...)
_type = Base.promote_eltype(args...)
return map(x->convert.(_type, x), args)
end
## Utilities
"""
$(SIGNATURES)
Check if the problem has control inputs.
"""
is_autonomous(::AbstractDataDrivenProblem{N, U, C}) where {N,U,C} = U
"""
$(SIGNATURES)
Check if the problem is time discrete.
"""
is_discrete(::AbstractDataDrivenProblem{N, U, C}) where {N,U,C} = C == DDProbType(2)
"""
$(SIGNATURES)
Check if the problem is direct.
"""
is_direct(::AbstractDataDrivenProblem{N, U, C}) where {N,U,C} = C == DDProbType(1)
"""
$(SIGNATURES)
Check if the problem is time continuous.
"""
is_continuous(::AbstractDataDrivenProblem{N, U, C}) where {N,U,C} = C == DDProbType(3)
"""
$(SIGNATURES)
Check if the problem is parameterized.
"""
is_parametrized(x::AbstractDataDrivenProblem{N, U, C}) where {N,U,C} = hasfield(typeof(x), :p) && !isempty(x.p)
"""
$(SIGNATURES)
Check if the problem has associated measurement times.
"""
has_timepoints(x::AbstractDataDrivenProblem{N,U,C}) where {N,U,C} = !isempty(x.t)
## Concrete Type
"""
$(TYPEDEF)
The `DataDrivenProblem` defines a general estimation problem given measurements, inputs and (in the near future) observations.
Two construction methods are available:
+ `DiscreteDataDrivenProblem` for time discrete systems
+ `ContinousDataDrivenProblem` for systems continuous in time
both are aliases for constructing a problem.
# Fields
$(FIELDS)
# Signatures
$(SIGNATURES)
# Example
```julia
X, DX, t = data...
# Define a discrete time problem
prob = DiscreteDataDrivenProblem(X)
# Define a continuous time problem without explicit time points
prob = ContinuousDataDrivenProblem(X, DX)
# Define a continuous time problem without explicit derivatives
prob = ContinuousDataDrivenProblem(X, t)
# Define a discrete time problem with an input function as a function
input_signal(u,p,t) = t^2
prob = DiscreteDataDrivenProblem(X, t, input_signal)
```
"""
struct DataDrivenProblem{dType, cType, probType} <: AbstractDataDrivenProblem{dType, cType, probType}
# Data
"""State measurements"""
X::AbstractMatrix{dType}
"""Time measurements (optional)"""
t::AbstractVector{dType}
"""Differental state measurements (optional); Used for time continuous problems"""
DX::AbstractMatrix{dType}
"""Output measurements (optional); Used for direct problems"""
Y::AbstractMatrix{dType}
"""Input measurements (optional); Used for non-autonoumous problems"""
U::AbstractMatrix{dType}
"""Parameters associated with the problem (optional)"""
p::AbstractVector{dType}
"""Name of the problem"""
name::Symbol
end
function DataDrivenProblem(probType, X, t, DX, Y, U, p; name = gensym(:DDProblem), kwargs...)
dType = Base.promote_eltype(X, t, DX, Y, U, p)
cType = isempty(U)
name = isa(name, Symbol) ? name : Symbol(name)
# We assume a discrete Problem
if isnothing(probType)
probType = DDProbType(2)
if (isempty(DX) && !isempty(Y))
probType = DDProbType(1) # Direct problem
elseif !isempty(DX)
probType = DDProbType(3) # Continouos
end
end
return DataDrivenProblem{dType, cType, probType}(_promote(X,t,DX,Y,U,p)..., name)
end
function DataDrivenProblem(probtype, X, t, DX, Y, U::F, p; kwargs...) where F <: Function
# Generate the input as a Matrix
ts = isempty(t) ? zeros(eltype(X),size(X,2)) : t
u_ = hcat(map(i->U(X[:,i], p, ts[i]), 1:size(X,2))...)
return DataDrivenProblem(probtype, _promote(X,t,DX,Y,u_,p)...; kwargs...)
end
function DataDrivenProblem(X::AbstractMatrix;
t::AbstractVector = collect(one(eltype(X)):size(X,2)),
DX::AbstractMatrix = Array{eltype(X)}(undef, 0, 0),
Y::AbstractMatrix = Array{eltype(X)}(undef, 0,0),
U::F = Array{eltype(X)}(undef, 0,0),
p::AbstractVector = Array{eltype(X)}(undef, 0),
probtype = nothing,
kwargs...) where F <: Union{AbstractMatrix, Function}
return DataDrivenProblem(probtype, X,t,DX,Y,U,p; kwargs...)
end
function Base.summary(io::IO, x::DataDrivenProblem{N,C,P}) where {N,C,P}
print(io, "$P DataDrivenProblem{$N} $(x.name)")
n,m = size(x.X)
print(io, " in $n dimensions and $m samples")
C ? nothing : print(io, " with controls")
return
end
function Base.print(io::IO, x::AbstractDataDrivenProblem{N,C,P}) where {N,C,P}
println(io, "$P DataDrivenProblem{$N} $(x.name)")
println(io, "Summary")
n,m = size(x.X)
println(io, "$m measurements")
println(io, "$n state(s)")
n = size(x.DX, 1)
isempty(x.DX) ? nothing : println("$n differential state(s)")
n = size(x.Y, 1)
isempty(x.Y) ? nothing : println("$n observed variable(s)")
n = size(x.U, 1)
isempty(x.U) ? nothing : println(io, "$n control(s)")
end
Base.show(io::IO, x::DataDrivenProblem{N,C,P}) where {N,C,P} = summary(io, x)
## Discrete Constructors
"""
A time discrete `DataDrivenProblem` useable for problems of the form `f(x[i],p,t,u) ↦ x[i+1]`.
$(SIGNATURES)
"""
function DiscreteDataDrivenProblem(X::AbstractMatrix; kwargs...)
DataDrivenProblem(X; probtype = DDProbType(2), kwargs...)
end
function DiscreteDataDrivenProblem(X::AbstractMatrix, t::AbstractVector; kwargs...)
DataDrivenProblem(X; t=t, probtype = DDProbType(2), kwargs...)
end
function DiscreteDataDrivenProblem(X::AbstractMatrix, t::AbstractVector, U::AbstractMatrix; kwargs...)
return DataDrivenProblem(X; t=t, U = U , probtype = DDProbType(2), kwargs...)
end
function DiscreteDataDrivenProblem(X::AbstractMatrix, t::AbstractVector, U::Function; kwargs...)
return DataDrivenProblem(X; t=t, U = U , probtype = DDProbType(2), kwargs...)
end
## Continouos Constructors
"""
A time continuous `DataDrivenProblem` useable for problems of the form `f(x,p,t,u) ↦ dx/dt`.
$(SIGNATURES)
Automatically constructs derivatives via an additional collocation method, which can be either a collocation
or an interpolation from `DataInterpolations.jl` wrapped by an `InterpolationMethod`.
"""
function ContinuousDataDrivenProblem(X::AbstractMatrix, DX::AbstractMatrix; kwargs...)
return DataDrivenProblem(X; DX = DX, probtype = DDProbType(3), kwargs...)
end
function ContinuousDataDrivenProblem(X::AbstractMatrix, t::AbstractVector, DX::AbstractMatrix; kwargs...)
return DataDrivenProblem(X; t = t, DX = DX, probtype = DDProbType(3), kwargs...)
end
function ContinuousDataDrivenProblem(X::AbstractMatrix, t::AbstractVector, DX::AbstractMatrix, U::AbstractMatrix; kwargs...)
return DataDrivenProblem(X; t = t, DX = DX, U = U, probtype = DDProbType(3), kwargs...)
end
function ContinuousDataDrivenProblem(X::AbstractMatrix, t::AbstractVector, DX::AbstractMatrix, U::F; kwargs...) where {F <: Function}
return DataDrivenProblem(X; t = t, DX = DX, U = U, probtype = DDProbType(3), kwargs...)
end
function ContinuousDataDrivenProblem(X::AbstractMatrix, t::AbstractVector, collocation = InterpolationMethod(); kwargs...)
dx, x = collocate_data(X, t, collocation)
return DataDrivenProblem(x; t = t, DX = dx, probtype = DDProbType(3), kwargs...)
end
function ContinuousDataDrivenProblem(X::AbstractMatrix, t::AbstractVector, U::AbstractMatrix, collocation; kwargs...)
dx, x = collocate_data(X, t, collocation)
return DataDrivenProblem(x; t = t, DX = dx, U = U, probtype = DDProbType(3), kwargs...)
end
## Direct Constructors
"""
A direct `DataDrivenProblem` useable for problems of the form `f(x,p,t,u) ↦ y`.
$(SIGNATURES)
"""
function DirectDataDrivenProblem(X::AbstractMatrix, Y::AbstractMatrix; kwargs...)
return DataDrivenProblem(X; Y = Y, probtype = DDProbType(1), kwargs...)
end
function DirectDataDrivenProblem(X::AbstractMatrix, t::AbstractVector, Y::AbstractMatrix; kwargs...)
return DataDrivenProblem(X; t = t, Y = Y, probtype = DDProbType(1), kwargs...)
end
function DirectDataDrivenProblem(X::AbstractMatrix, t::AbstractVector, Y::AbstractMatrix, U; kwargs...)
return DataDrivenProblem(X; t = t, Y = Y, U = U, probtype = DDProbType(1),kwargs...)
end
Base.length(prob::AbstractDataDrivenProblem) = size(prob.X, 2)
# Special case of Discrete
Base.length(prob::AbstractDiscreteProb) = size(prob.X, 2)-1
Base.size(prob::AbstractDataDrivenProblem) = size(prob.X)
Base.size(prob::AbstractDiscreteProb) = begin
n,m = size(prob.X)
return (n, m-1)
end
"""
$(SIGNATURES)
Returns the name of the `problem`.
"""
get_name(p::AbstractDataDrivenProblem) = getfield(p, :name)
## Utils
function ModelingToolkit.states(p::AbstractDataDrivenProblem, i = :, j = :)
x = getfield(p, :X)
isempty(x) ? x : getindex(x, i, j)
end
function ModelingToolkit.parameters(p::AbstractDataDrivenProblem, i = :)
x = getfield(p, :p)
isempty(x) ? x : getindex(x, i)
end
function ModelingToolkit.independent_variable(p::AbstractDataDrivenProblem, i = :)
x = getfield(p, :t)
isempty(x) ? x : getindex(x, i)
end
function ModelingToolkit.get_dvs(p::AbstracContProb, i = :, j = :)
x = getfield(p, :DX)
isempty(x) ? x : getindex(x, i, j)
end
function ModelingToolkit.get_dvs(p::AbstractDirectProb, i = :, j = :)
x = getfield(p, :Y)
isempty(x) ? x : getindex(x, i, j)
end
function ModelingToolkit.get_dvs(p::AbstractDiscreteProb, i = :, j = :)
ModelingToolkit.states(p, i, j)
end
function ModelingToolkit.observed(p::AbstractDataDrivenProblem, i = :, j = :)
x = getfield(p, :Y)
isempty(x) ? x : getindex(x, i, j)
end
function ModelingToolkit.controls(p::AbstractDataDrivenProblem, i = :, j = :)
x = getfield(p, :U)
isempty(x) ? x : getindex(x, i, j)
end
function Base.getindex(p::AbstractDataDrivenProblem, i = :, j = :)
return (
ModelingToolkit.states(p, i, j),
ModelingToolkit.parameters(p),
ModelingToolkit.independent_variable(p, j),
ModelingToolkit.controls(p, i, j)
)
end
# TODO This is just for explicit basis!
# Make the basis callable with the problem
# Explicit
@views (b::AbstractBasis)(p::AbstractDataDrivenProblem) = begin
b(states(p), parameters(p), independent_variable(p), controls(p))
end
@views (b::AbstractBasis)(dx::AbstractMatrix, p::AbstractDataDrivenProblem) = begin
b(dx, states(p), parameters(p), independent_variable(p), controls(p))
end
@views (b::AbstractBasis)(p::AbstractDataDrivenProblem, j) = begin
b(p[:, j]...)
end
@views (b::AbstractBasis)(dx::AbstractMatrix, p::AbstractDataDrivenProblem, j) = begin
b(dx, p[:, j]...)
end
# Special case discrete problem
@views (b::AbstractBasis)(p::AbstractDiscreteProb) = begin
b(p[:, 1:length(p)]...)
end
@views (b::AbstractBasis)(dx::AbstractMatrix, p::AbstractDiscreteProb) = begin
b(dx, p, 1:length(p))
end
# Check for nans, inf etc
check_domain(x) = @assert all(.~isnan.(x)) && all(.~isinf.(x)) ("One or more measurements contain `NaN` or `Inf`.")
check_lengths(args...) = @assert all(map(x->size(x)[end], args) .== size(first(args))[end]) "One or more measurements are not sized equally."
# Return the target variables
get_target(x::AbstractDirectProb{N,C}) where {N,C} = x.Y
get_target(x::AbstractDiscreteProb{N,C}) where {N,C} = x.X[:,2:end]
get_target(x::AbstracContProb{N,C}) where {N,C} = x.DX
get_oop_args(x::AbstractDataDrivenProblem{N,C,P}) where {N <: Number, C, P} = map(f->getfield(x, f), (:X, :p, :t, :U))
function get_oop_args(x::AbstractDiscreteProb{N,C}) where {N <: Number, C}
return (
x.X[:, 1:end-1],
x.p,
x.t[1:end-1],
x.U[:, 1:end-1]
)
end
function get_implicit_oop_args(x::AbstractDirectProb{N,C}) where {N <: Number, C}
return (
[x.X; x.Y],
x.p,
x.t,
x.U
)
end
function get_implicit_oop_args(x::AbstracContProb{N,C}) where {N <: Number, C}
return (
[x.X; x.DX],
x.p,
x.t,
x.U
)
end
function get_implicit_oop_args(x::AbstractDiscreteProb{N,C}) where {N <: Number, C}
return (
[x.X[:, 1:end-1]; x.X[:, 2:end]],
x.p,
x.t[1:end-1],
x.U[:, 1:end-1]
)
end
"""
$(SIGNATURES)
Checks if a `DataDrivenProblem` is valid by checking if the data contains `NaN`, `Inf` and
if the number of measurements is consistent.
# Example
```julia
is_valid(problem)
```
"""
function is_valid(x::AbstractDirectProb{N,C}) where {N <: Number,C}
map(check_domain, (x.X, x.Y, x.U, x.t, x.p))
if !C && !isempty(x.t)
check_lengths(x.X, x.Y, x.U, x.t)
elseif !C
check_lengths(x.X, x.Y, x.U)
elseif C && !isempty(x.t)
check_lengths(x.X, x.Y, x.t)
else
check_lengths(x.X, x.Y)
end
return true
end
function is_valid(x::AbstractDiscreteProb{N,C}) where {N <: Number,C}
map(check_domain, (x.X, x.U, x.t, x.p))
if !C && !isempty(x.t)
check_lengths(x.X, x.U, x.t)
elseif !C
check_lengths(x.X, x.U)
elseif C && !isempty(x.t)
check_lengths(x.X, x.t)
else
check_lengths(x.X)
end
return true
end
function is_valid(x::AbstracContProb{N,C}) where {N <: Number,C}
map(check_domain, (x.X, x.DX, x.U, x.t, x.p))
if !C && !isempty(x.t)
check_lengths(x.X, x.DX, x.U, x.t)
elseif !C
check_lengths(x.X, x.DX, x.U)
elseif C && !isempty(x.t)
check_lengths(x.X, x.DX, x.t)
else
check_lengths(x.X, x.DX)
end
return true
end
"""
$(SIGNATURES)
Asserts if the given combination of `problem` and `basis` - and optionally a target matrix `dx` for in place evaluation - is
valid in its dimensions. Should only be used for checking explicit evaluation. Can also be used as a shorthand to
```julia
@is_applicable problem # Checks if the problem is well posed in terms of dimensions
@is_applicable problem basis # Checks if the basis can be called with the problem out of place
@is_applicable problem basis dx # Checks if the basis can be called with the problem and matrix dx in place
```
"""
macro is_applicable(problem)
return :(@assert is_valid($(esc(problem))))
end
macro is_applicable(problem, basis)
return quote
if isa($(esc(problem)), AbstractDirectProb)
@assert length(states($(esc(basis)))) == size(observed($(esc(problem))),1) "Problem and basis need to have same observed size"
else
@assert length(states($(esc(basis)))) == size(states($(esc(problem))),1) "Problem and basis need to have same state size"
end
@assert length(controls($(esc(basis)))) == size(controls($(esc(problem))),1) "Problem and basis need to have same control size"
@assert length(parameters($(esc(basis)))) <= length(parameters($(esc(problem)))) "Problem and basis need to have consistent parameter size"
end
end
macro is_applicable(problem, basis, dx)
return quote
@is_applicable $(esc(problem)) $(esc(basis))
lp = length($(esc(problem)))
n, m = size($(esc(dx)))
lb = length($(esc(basis)))
if isa($(esc(problem)), AbstractDiscreteProb)
@assert n == lb && m == lp-1 "Target array has to be of size ($lb, $lp)"
else
@assert n == lb && m == lp "Target array has to be of size ($lb, $lp)"
end
end
end
## DESolution dispatch
ContinuousDataDrivenProblem(sol::T; kwargs...) where T <: DiffEqBase.DESolution = DataDrivenProblem(sol; kwargs...)
DiscreteDataDrivenProblem(sol::T; kwargs...) where T <: DiffEqBase.DESolution = DataDrivenProblem(sol; kwargs...)
function DataDrivenProblem(sol::T; use_interpolation = false, kwargs...) where T <: DiffEqBase.DESolution
if sol.retcode != :Success
throw(AssertionError("The solution is not successful. Abort."))
return
end
X = Array(sol)
t = sol.t
p = sol.prob.p
p = isa(p, DiffEqBase.NullParameters) ? eltype(X)[] : p
if isdiscrete(sol.alg)
return DiscreteDataDrivenProblem(
X, t; p = p, kwargs...
)
else
if use_interpolation
DX = Array(sol(sol.t, Val{1}))
else
DX = similar(X)
if DiffEqBase.isinplace(sol.prob.f)
@views map(i->sol.prob.f(DX[:, i], X[:, i], p, t[i]), 1:size(X,2))
else
map(i->DX[:, i] .= sol.prob.f(X[:, i], p, t[i]), 1:size(X,2))
end
end
return ContinuousDataDrivenProblem(
X, t; DX = DX, p = p, kwargs...
)
end
end
|
proofpile-julia0005-42696 | {
"provenance": "014.jsonl.gz:242697"
} | # Autogenerated wrapper script for alsa_plugins_jll for armv7l-linux-musleabihf
export libasound_module_conf_pulse, libasound_module_ctl_arcam_av, libasound_module_ctl_oss, libasound_module_ctl_pulse, libasound_module_pcm_a52, libasound_module_pcm_oss, libasound_module_pcm_pulse, libasound_module_pcm_speex, libasound_module_pcm_upmix, libasound_module_pcm_usb_stream, libasound_module_pcm_vdownmix, libasound_module_rate_lavrate, libasound_module_rate_samplerate, libasound_module_rate_speexrate
using FFMPEG_jll
using alsa_jll
using libsamplerate_jll
using PulseAudio_jll
JLLWrappers.@generate_wrapper_header("alsa_plugins")
JLLWrappers.@declare_library_product(libasound_module_conf_pulse, "libasound_module_conf_pulse.so")
JLLWrappers.@declare_library_product(libasound_module_ctl_arcam_av, "libasound_module_ctl_arcam_av.so")
JLLWrappers.@declare_library_product(libasound_module_ctl_oss, "libasound_module_ctl_oss.so")
JLLWrappers.@declare_library_product(libasound_module_ctl_pulse, "libasound_module_ctl_pulse.so")
JLLWrappers.@declare_library_product(libasound_module_pcm_a52, "libasound_module_pcm_a52.so")
JLLWrappers.@declare_library_product(libasound_module_pcm_oss, "libasound_module_pcm_oss.so")
JLLWrappers.@declare_library_product(libasound_module_pcm_pulse, "libasound_module_pcm_pulse.so")
JLLWrappers.@declare_library_product(libasound_module_pcm_speex, "libasound_module_pcm_speex.so")
JLLWrappers.@declare_library_product(libasound_module_pcm_upmix, "libasound_module_pcm_upmix.so")
JLLWrappers.@declare_library_product(libasound_module_pcm_usb_stream, "libasound_module_pcm_usb_stream.so")
JLLWrappers.@declare_library_product(libasound_module_pcm_vdownmix, "libasound_module_pcm_vdownmix.so")
JLLWrappers.@declare_library_product(libasound_module_rate_lavrate, "libasound_module_rate_lavrate.so")
JLLWrappers.@declare_library_product(libasound_module_rate_samplerate, "libasound_module_rate_samplerate.so")
JLLWrappers.@declare_library_product(libasound_module_rate_speexrate, "libasound_module_rate_speexrate.so")
function __init__()
JLLWrappers.@generate_init_header(FFMPEG_jll, alsa_jll, libsamplerate_jll, PulseAudio_jll)
JLLWrappers.@init_library_product(
libasound_module_conf_pulse,
"lib/alsa-lib/libasound_module_conf_pulse.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_ctl_arcam_av,
"lib/alsa-lib/libasound_module_ctl_arcam_av.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_ctl_oss,
"lib/alsa-lib/libasound_module_ctl_oss.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_ctl_pulse,
"lib/alsa-lib/libasound_module_ctl_pulse.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_pcm_a52,
"lib/alsa-lib/libasound_module_pcm_a52.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_pcm_oss,
"lib/alsa-lib/libasound_module_pcm_oss.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_pcm_pulse,
"lib/alsa-lib/libasound_module_pcm_pulse.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_pcm_speex,
"lib/alsa-lib/libasound_module_pcm_speex.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_pcm_upmix,
"lib/alsa-lib/libasound_module_pcm_upmix.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_pcm_usb_stream,
"lib/alsa-lib/libasound_module_pcm_usb_stream.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_pcm_vdownmix,
"lib/alsa-lib/libasound_module_pcm_vdownmix.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_rate_lavrate,
"lib/alsa-lib/libasound_module_rate_lavrate.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_rate_samplerate,
"lib/alsa-lib/libasound_module_rate_samplerate.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libasound_module_rate_speexrate,
"lib/alsa-lib/libasound_module_rate_speexrate.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
proofpile-julia0005-42697 | {
"provenance": "014.jsonl.gz:242698"
} | #type CompositeMap <: Map
# header::MapHeader
# a::Map
# b::Map
# function CompositeMap(a::Map, b::Map)
# codomain(a) == domain(b) || throw("maps not compatible")
# r = new()
# r.a = a
# r.b = b
# image = function(M::Map, a::Any)
# return M.a(M.b(a))
# end
# preim = function(M::Map, a::Any)
# return preimage(M.b, preimage(M.a, a))
# end
# r.header = MapHeader(a.header.domain, b.header.codomain, image, preim)
# return r
# end
#end
#
#function show(io::IO, M::CompositeMap)
# println(io, "composite of")
# println(io, " ", M.a)
# println(io, "*")
# println(io, " ", M.b)
#end
#
#function *(a::Map, b::Map)
# return CompositeMap(a,b)
#end
#
|
proofpile-julia0005-42698 | {
"provenance": "014.jsonl.gz:242699"
} | using PrimitiveOneHot
using PrimitiveOneHot: OneHot, onehotsize
# old utility
onehot2indices(xs::OneHotArray) = reinterpret(Int32, xs.onehots)
indices2onehot(nums::Int, xs::AbstractArray{Int}) = OneHotArray(nums, xs)
tofloat(::Type{F}, o::OneHotArray{N, <:AbstractArray}) where {F<:AbstractFloat,N} = Array{F}(o)
# AD
using Flux
Flux.@nograd onehot2indices
Flux.@nograd indices2onehot
Flux.@nograd tofloat
|
proofpile-julia0005-42699 | {
"provenance": "014.jsonl.gz:242700"
} | abstract type AbstractMatrixOperator{Tconv} end
@params struct MatrixOperator{Tconv} <: AbstractMatrixOperator{Tconv}
K::Any
f::Any
conv::Tconv
end
function Base.show(::IO, ::MIME{Symbol("text/plain")}, ::MatrixOperator)
return println("TopOpt matrix linear operator")
end
LinearAlgebra.mul!(c, op::MatrixOperator, b) = mul!(c, op.K, b)
Base.size(op::MatrixOperator, i...) = size(op.K, i...)
Base.eltype(op::MatrixOperator) = eltype(op.K)
LinearAlgebra.:*(op::MatrixOperator, b) = mul!(similar(b), op.K, b)
@params struct MatrixFreeOperator{Tconv,T,dim} <: AbstractMatrixOperator{Tconv}
f::AbstractVector{T}
elementinfo::ElementFEAInfo{dim,T}
meandiag::T
vars::AbstractVector{T}
xes::Any
fixed_dofs::Any
free_dofs::Any
xmin::T
penalty::Any
conv::Tconv
end
function Base.show(::IO, ::MIME{Symbol("text/plain")}, ::MatrixFreeOperator)
return println("TopOpt matrix-free linear operator")
end
Base.size(op::MatrixFreeOperator) = (size(op, 1), size(op, 2))
Base.size(op::MatrixFreeOperator, i) = 1 <= i <= 2 ? length(op.elementinfo.fixedload) : 1
Base.eltype(op::MatrixFreeOperator{<:Any,T}) where {T} = T
import LinearAlgebra: *, mul!
function *(A::MatrixFreeOperator, x)
y = similar(x)
mul!(y, A::MatrixFreeOperator, x)
return y
end
function mul!(y::TV, A::MatrixFreeOperator, x::TV) where {TV<:AbstractVector}
T = eltype(y)
nels = length(A.elementinfo.Kes)
ndofs = length(A.elementinfo.fixedload)
dofspercell = size(A.elementinfo.Kes[1], 1)
meandiag = A.meandiag
@unpack Kes, metadata, black, white, varind = A.elementinfo
@unpack cell_dofs, dof_cells = metadata
@unpack penalty, xmin, vars, fixed_dofs, free_dofs, xes = A
for i in 1:nels
if PENALTY_BEFORE_INTERPOLATION
px = ifelse(
black[i],
one(T),
ifelse(white[i], xmin, density(penalty(vars[varind[i]]), xmin)),
)
else
px = ifelse(
black[i],
one(T),
ifelse(white[i], xmin, penalty(density(vars[varind[i]], xmin))),
)
end
xe = xes[i]
for j in 1:dofspercell
xe = @set xe[j] = x[cell_dofs[j, i]]
end
if eltype(Kes) <: Symmetric
xes[i] = px * (bcmatrix(Kes[i]).data * xe)
else
xes[i] = px * (bcmatrix(Kes[i]) * xe)
end
end
for i in 1:length(fixed_dofs)
dof = fixed_dofs[i]
y[dof] = meandiag * x[dof]
end
for i in 1:length(free_dofs)
dof = free_dofs[i]
yi = zero(T)
r = dof_cells.offsets[dof]:(dof_cells.offsets[dof + 1] - 1)
for ind in r
k, m = dof_cells.values[ind]
yi += xes[k][m]
end
y[dof] = yi
end
return y
end
|
proofpile-julia0005-42700 | {
"provenance": "014.jsonl.gz:242701"
} | using ModelingToolkit, Unitful, OrdinaryDiffEq, DiffEqJump, IfElse
using Test
MT = ModelingToolkit
@parameters τ [unit = u"ms"] γ
@variables t [unit = u"ms"] E(t) [unit = u"kJ"] P(t) [unit = u"MW"]
D = Differential(t)
#This is how equivalent works:
@test MT.equivalent(u"MW" ,u"kJ/ms")
@test !MT.equivalent(u"m", u"cm")
@test MT.equivalent(MT.get_unit(P^γ), MT.get_unit((E/τ)^γ))
# Basic access
@test MT.get_unit(t) == u"ms"
@test MT.get_unit(E) == u"kJ"
@test MT.get_unit(τ) == u"ms"
@test MT.get_unit(γ) == MT.unitless
@test MT.get_unit(0.5) == MT.unitless
@test MT.get_unit(MT.SciMLBase.NullParameters) == MT.unitless
# Prohibited unit types
@parameters β [unit = u"°"] α [unit = u"°C"] γ [unit = 1u"s"]
@test_throws MT.ValidationError MT.get_unit(β)
@test_throws MT.ValidationError MT.get_unit(α)
@test_throws MT.ValidationError MT.get_unit(γ)
# Non-trivial equivalence & operators
@test MT.get_unit(τ^-1) == u"ms^-1"
@test MT.equivalent(MT.get_unit(D(E)),u"MW")
@test MT.equivalent(MT.get_unit(E/τ), u"MW")
@test MT.get_unit(2*P) == u"MW"
@test MT.get_unit(t/τ) == MT.unitless
@test MT.equivalent(MT.get_unit(P - E/τ),u"MW")
@test MT.equivalent(MT.get_unit(D(D(E))),u"MW/ms")
@test MT.get_unit(IfElse.ifelse(t>t,P,E/τ)) == u"MW"
@test MT.get_unit(1.0^(t/τ)) == MT.unitless
@test MT.get_unit(exp(t/τ)) == MT.unitless
@test MT.get_unit(sin(t/τ)) == MT.unitless
@test MT.get_unit(sin(1u"rad")) == MT.unitless
@test MT.get_unit(t^2) == u"ms^2"
eqs = [D(E) ~ P - E/τ
0 ~ P]
@test MT.validate(eqs)
@named sys = ODESystem(eqs)
@test !MT.validate(D(D(E)) ~ P)
@test !MT.validate(0 ~ P + E*τ)
# Array variables
@variables t [unit = u"s"] x[1:3](t) [unit = u"m"]
@parameters v[1:3] = [1,2,3] [unit = u"m/s"]
D = Differential(t)
eqs = D.(x) .~ v
ODESystem(eqs,name=:sys)
# Difference equation
@parameters t [unit = u"s"] a [unit = u"s"^-1]
@variables x(t) [unit = u"kg"]
δ = Differential(t)
D = Difference(t; dt = 0.1u"s")
eqs = [
δ(x) ~ a*x
]
de = ODESystem(eqs, t, [x], [a],name=:sys)
# Nonlinear system
@parameters a [unit = u"kg"^-1]
@variables x [unit = u"kg"]
eqs = [
0 ~ a*x
]
@named nls = NonlinearSystem(eqs, [x], [a])
# SDE test w/ noise vector
@parameters τ [unit = u"ms"] Q [unit = u"MW"]
@variables t [unit = u"ms"] E(t) [unit = u"kJ"] P(t) [unit = u"MW"]
D = Differential(t)
eqs = [D(E) ~ P - E/τ
P ~ Q]
noiseeqs = [0.1u"MW",
0.1u"MW"]
@named sys = SDESystem(eqs, noiseeqs, t, [P, E], [τ, Q])
# With noise matrix
noiseeqs = [0.1u"MW" 0.1u"MW"
0.1u"MW" 0.1u"MW"]
@named sys = SDESystem(eqs,noiseeqs, t, [P, E], [τ, Q])
# Invalid noise matrix
noiseeqs = [0.1u"MW" 0.1u"MW"
0.1u"MW" 0.1u"s"]
@test !MT.validate(eqs,noiseeqs)
# Non-trivial simplifications
@variables t [unit = u"s"] V(t) [unit = u"m"^3] L(t) [unit = u"m"]
@parameters v [unit = u"m/s"] r [unit =u"m"^3/u"s"]
D = Differential(t)
eqs = [D(L) ~ v,
V ~ L^3]
@named sys = ODESystem(eqs)
sys_simple = structural_simplify(sys)
eqs = [D(V) ~ r,
V ~ L^3]
@named sys = ODESystem(eqs)
sys_simple = structural_simplify(sys)
@variables V [unit = u"m"^3] L [unit = u"m"]
@parameters v [unit = u"m/s"] r [unit = u"m"^3/u"s"] t [unit = u"s"]
eqs = [V ~ r*t,
V ~ L^3]
@named sys = NonlinearSystem(eqs, [V, L], [t, r])
sys_simple = structural_simplify(sys)
eqs = [L ~ v*t,
V ~ L^3]
@named sys = NonlinearSystem(eqs, [V,L], [t,r])
sys_simple = structural_simplify(sys)
#Jump System
@parameters β [unit = u"(mol^2*s)^-1"] γ [unit = u"(mol*s)^-1"] t [unit = u"s"] jumpmol [unit = u"mol"]
@variables S(t) [unit = u"mol"] I(t) [unit = u"mol"] R(t) [unit = u"mol"]
rate₁ = β*S*I
affect₁ = [S ~ S - 1*jumpmol, I ~ I + 1*jumpmol]
rate₂ = γ*I
affect₂ = [I ~ I - 1*jumpmol, R ~ R + 1*jumpmol]
j₁ = ConstantRateJump(rate₁, affect₁)
j₂ = VariableRateJump(rate₂, affect₂)
js = JumpSystem([j₁, j₂], t, [S, I, R], [β, γ],name=:sys)
affect_wrong = [S ~ S - jumpmol, I ~ I + 1]
j_wrong = ConstantRateJump(rate₁, affect_wrong)
@test_throws MT.ValidationError JumpSystem([j_wrong, j₂], t, [S, I, R], [β, γ],name=:sys)
rate_wrong = γ^2*I
j_wrong = ConstantRateJump(rate_wrong, affect₂)
@test_throws MT.ValidationError JumpSystem([j₁, j_wrong], t, [S, I, R], [β, γ],name=:sys)
# mass action jump tests for SIR model
maj1 = MassActionJump(2*β/2, [S => 1, I => 1], [S => -1, I => 1])
maj2 = MassActionJump(γ, [I => 1], [I => -1, R => 1])
@named js3 = JumpSystem([maj1, maj2], t, [S,I,R], [β,γ])
#Test unusual jump system
@parameters β γ t
@variables S(t) I(t) R(t)
maj1 = MassActionJump(2.0, [0 => 1], [S => 1])
maj2 = MassActionJump(γ, [S => 1], [S => -1])
@named js4 = JumpSystem([maj1, maj2], t, [S], [β, γ])
|
proofpile-julia0005-42701 | {
"provenance": "014.jsonl.gz:242702"
} |
"""
an example of optim_algorithm is NLopt.LN_BOBYQA
"""
function setupβLSsolverMultistartoptim(optim_algorithm,
Es::Vector{NMRSpectraSimulator.κCompoundFIDType{T, SST}},
As::Vector{NMRSpectraSimulator.CompoundFIDType{T, SST}},
LS_inds,
U_rad_cost,
y_cost::Vector{Complex{T}},
κs_β_DOFs, κs_β_orderings;
κ_lb_default = 0.2,
κ_ub_default = 5.0,
max_iters = 50,
xtol_rel = 1e-9,
ftol_rel = 1e-9,
maxtime = Inf,
N_starts = 100,
inner_xtol_rel = 1e-12,
inner_maxeval = 100,
inner_maxtime = Inf) where {T,SST}
#
N_β = sum( getNβ(κs_β_DOFs[n], Bs[n]) for n = 1:length(Bs) )
β_lb = ones(T, N_β) .* (-π)
β_ub = ones(T, N_β) .* (π)
#β_initial = zeros(T, N_β)
p_lb = β_lb
p_ub = β_ub
q, updateβfunc, updateκfunc, E_BLS, κ_BLS, b_BLS,
getβfunc = setupcostβLS(Es, Bs, As, LS_inds, U_rad_cost, y_cost,
κs_β_DOFs,
κs_β_orderings;
κ_lb_default = κ_lb_default,
κ_ub_default = κ_ub_default)
f = pp->costβLS(U_rad_cost, y_cost, updateβfunc, updateκfunc, pp, Es,
E_BLS, κ_BLS, b_BLS, q)
P = MultistartOptimization.MinimizationProblem(f,
p_lb, p_ub)
#
local_method = MultistartOptimization.NLoptLocalMethod(; algorithm = optim_algorithm,
xtol_rel = inner_xtol_rel,
maxeval = inner_maxeval,
maxtime = inner_maxtime)
multistart_method = MultistartOptimization.TikTak(N_starts)
run_optim = pp->MultistartOptimization.multistart_minimization(multistart_method,
local_method, P)
return run_optim, f, E_BLS, κ_BLS, b_BLS, updateβfunc, q
end
function setupβLSsolver(optim_algorithm,
Es,
Bs,
As,
LS_inds,
U_rad_cost,
y_cost::Vector{Complex{T}},
κs_β_DOFs::Vector{Vector{Int}},
κs_β_orderings::Vector{Vector{Vector{Int}}};
κ_lb_default = 0.2,
κ_ub_default = 5.0,
max_iters = 50,
xtol_rel = 1e-9,
ftol_rel = 1e-9,
maxtime = Inf) where T
#N_β = sum( getNβ(κs_β_DOFs[n], Bs[n]) for n = 1:length(Bs) )
N_β = sum( getNβ(Bs[n]) for n = 1:length(Bs) )
β_lb = ones(T, N_β) .* (-π)
β_ub = ones(T, N_β) .* (π)
#β_initial = zeros(T, N_β)
p_lb = β_lb
p_ub = β_ub
q, updateβfunc, updateκfunc, E_BLS, κ_BLS, b_BLS,
getβfunc = setupcostβLS(Es, Bs, As, LS_inds, U_rad_cost, y_cost,
κs_β_DOFs, κs_β_orderings;
κ_lb_default = κ_lb_default,
κ_ub_default = κ_ub_default)
# q is simulated spectra. f is cost function.
f = pp->costβLS(U_rad_cost, y_cost, updateβfunc, updateκfunc, pp, Es,
E_BLS, κ_BLS, b_BLS, q)
df = xx->FiniteDiff.finite_difference_gradient(f, xx)
opt = NLopt.Opt(optim_algorithm, N_β)
run_optim = pp->runNLopt!(opt,
pp,
f,
df,
p_lb,
p_ub;
max_iters = max_iters,
xtol_rel = xtol_rel,
ftol_rel = ftol_rel,
maxtime = maxtime)
return run_optim, f, E_BLS, κ_BLS, b_BLS, updateβfunc, updateκfunc, q
end
"""
κ version.
"""
function setupcostβLS(Es::Vector{NMRSpectraSimulator.καFIDModelType{T, SST}},
Bs,
As,
LS_inds,
U0_rad,
y0::Vector{Complex{T}},
κs_β_DOFs,
κs_β_orderings;
κ_lb_default = 0.2,
κ_ub_default = 5.0) where {T <: Real, SST}
w = ones(T, length(Es))
#N_β = sum( getNβ(κs_β_DOFs[n], Bs[n]) for n = 1:length(Bs) )
N_β = sum( getNβ(Bs[n]) for n = 1:length(Bs) )
st_ind_β = 1
fin_ind_β = st_ind_β + N_β - 1
#updateβfunc = pp->updateβ!(Bs, κs_β_orderings, κs_β_DOFs, pp, st_ind_β)
updateβfunc = pp->updateβ!(Bs, pp, st_ind_β)
f = uu->NMRSpectraSimulator.evalitpproxymixture(uu, Bs, Es; w = w)
### LS κ.
U_rad_LS = U0_rad[LS_inds]
N_κ, N_κ_singlets = countκ(Es)
N_κ_vars = N_κ + N_κ_singlets
E_BLS, κ_BLS, b_BLS = setupupdateLS(length(U_rad_LS), N_κ_vars, y0[LS_inds])
κ_lb = ones(N_κ_vars) .* κ_lb_default
κ_ub = ones(N_κ_vars) .* κ_ub_default
updateκfunc = xx->updateκ2!(E_BLS, b_BLS, κ_BLS,
U_rad_LS, Es, As, w, κ_lb, κ_ub)
#### extract parameters from p.
getβfunc = pp->pp[st_ind_β:fin_ind_β]
return f, updateβfunc, updateκfunc, E_BLS, κ_BLS, b_BLS, getβfunc
end
function costβLS(U_rad,
S_U::Vector{Complex{T}},
updateβfunc, updateκfunc,
p::Vector{T}, Es,
E_BLS::Matrix{Complex{T}}, κ_BLS::Vector{T}, b_BLS,
f)::T where T <: Real
updateβfunc(p)
# try updateκfunc(p)
# catch e
# println("error in updateκfunc(p)")
# bt = catch_backtrace()
# showerror(stdout, e, bt)
# rethrow(e)
# end
updateκfunc(p)
parseκ!(Es, κ_BLS)
#println("all(isfinite.(Es)) = ", all(isfinite.(Es)))
# ## l-2 costfunc.
# cost = zero(T)
# for m = 1:length(S_U)
#
# f_u = f(U_rad[m])
#
# cost += abs2( f_u - S_U[m] )
# end
#
# # faster l-2 cost compute.
# B = reinterpret(T, E_BLS)
# tmp = B*κ_BLS - b_BLS
# cost = dot(tmp, tmp)
cost = norm(reinterpret(T, E_BLS)*κ_BLS - b_BLS)^2 # compact version.
return cost
end
|
proofpile-julia0005-42702 | {
"provenance": "014.jsonl.gz:242703"
} | # This file was generated by the Julia Swagger Code Generator
# Do not modify this file directly. Modify the swagger specification instead.
mutable struct ApplicationGatewayBackendAddress2 <: SwaggerModel
fqdn::Any # spec type: Union{ Nothing, String } # spec name: fqdn
ipAddress::Any # spec type: Union{ Nothing, String } # spec name: ipAddress
function ApplicationGatewayBackendAddress2(;fqdn=nothing, ipAddress=nothing)
o = new()
validate_property(ApplicationGatewayBackendAddress2, Symbol("fqdn"), fqdn)
setfield!(o, Symbol("fqdn"), fqdn)
validate_property(ApplicationGatewayBackendAddress2, Symbol("ipAddress"), ipAddress)
setfield!(o, Symbol("ipAddress"), ipAddress)
o
end
end # type ApplicationGatewayBackendAddress2
const _property_map_ApplicationGatewayBackendAddress2 = Dict{Symbol,Symbol}(Symbol("fqdn")=>Symbol("fqdn"), Symbol("ipAddress")=>Symbol("ipAddress"))
const _property_types_ApplicationGatewayBackendAddress2 = Dict{Symbol,String}(Symbol("fqdn")=>"String", Symbol("ipAddress")=>"String")
Base.propertynames(::Type{ ApplicationGatewayBackendAddress2 }) = collect(keys(_property_map_ApplicationGatewayBackendAddress2))
Swagger.property_type(::Type{ ApplicationGatewayBackendAddress2 }, name::Symbol) = Union{Nothing,eval(Base.Meta.parse(_property_types_ApplicationGatewayBackendAddress2[name]))}
Swagger.field_name(::Type{ ApplicationGatewayBackendAddress2 }, property_name::Symbol) = _property_map_ApplicationGatewayBackendAddress2[property_name]
function check_required(o::ApplicationGatewayBackendAddress2)
true
end
function validate_property(::Type{ ApplicationGatewayBackendAddress2 }, name::Symbol, val)
end
|
proofpile-julia0005-42703 | {
"provenance": "014.jsonl.gz:242704"
} | export EinCode, EinIndexer, EinArray
export einarray
"""
EinCode
EinCode(ixs, iy)
Abstract type for sum-product contraction code.
The constructor returns a `DynamicEinCode` instance.
"""
abstract type EinCode end
"""
StaticEinCode{ixs, iy}
The static version of `DynamicEinCode` that matches the contraction rule at compile time.
It is the default return type of `@ein_str` macro.
"""
struct StaticEinCode{ixs, iy} <: EinCode end
getixs(::StaticEinCode{ixs}) where ixs = ixs
getiy(::StaticEinCode{ixs, iy}) where {ixs, iy} = iy
labeltype(::StaticEinCode{ixs,iy}) where {ixs, iy} = promote_type(eltype.(ixs)..., eltype(iy))
getixsv(code::StaticEinCode) = [collect(labeltype(code), ix) for ix in getixs(code)]
getiyv(code::StaticEinCode) = collect(labeltype(code), getiy(code))
"""
DynamicEinCode{LT}
DynamicEinCode(ixs, iy)
Wrapper to `eincode`-specification that creates a callable object
to evaluate the `eincode` `ixs -> iy` where `ixs` are the index-labels
of the input-tensors and `iy` are the index-labels of the output.
# example
```jldoctest; setup = :(using OMEinsum)
julia> a, b = rand(2,2), rand(2,2);
julia> OMEinsum.DynamicEinCode((('i','j'),('j','k')),('i','k'))(a, b) ≈ a * b
true
```
"""
struct DynamicEinCode{LT} <: EinCode
ixs::Vector{Vector{LT}}
iy::Vector{LT}
end
# to avoid ambiguity error, support tuple inputs
function DynamicEinCode(ixs, iy)
@debug "generating dynamic eincode ..."
if isempty(ixs)
error("number of input tensors must be greater than 0")
end
DynamicEinCode(_tovec(ixs, iy)...)
end
_tovec(ixs::NTuple{N,Tuple{}}, iy::Tuple{}) where {N} = [collect(Union{}, ix) for ix in ixs], collect(Union{}, iy)
_tovec(ixs::NTuple{N,NTuple{M,LT} where M}, iy::NTuple{K,LT}) where {N,K,LT} = [collect(LT, ix) for ix in ixs], collect(LT, iy)
_tovec(ixs::NTuple{N,Vector{LT}}, iy::Vector{LT}) where {N,LT} = collect(ixs), iy
_tovec(ixs::AbstractVector{Vector{LT}}, iy::AbstractVector{LT}) where {N,K,LT} = collect(ixs), collect(iy)
Base.:(==)(x::DynamicEinCode, y::DynamicEinCode) = x.ixs == y.ixs && x.iy == y.iy
# forward from EinCode, for compatibility
EinCode(ixs, iy) = DynamicEinCode(ixs, iy)
getixs(code::DynamicEinCode) = code.ixs
getiy(code::DynamicEinCode) = code.iy
labeltype(::DynamicEinCode{LT}) where LT = LT
getixsv(code::DynamicEinCode) = code.ixs
getiyv(code::DynamicEinCode) = code.iy
# conversion
DynamicEinCode(::StaticEinCode{ixs, iy}) where {ixs, iy} = DynamicEinCode(ixs, iy)
StaticEinCode(code::DynamicEinCode) = StaticEinCode{(Tuple.(code.ixs)...,), (code.iy...,)}()
"""
EinIndexer{locs,N}
A structure for indexing `EinArray`s. `locs` is the index positions (among all indices).
In the constructor, `size` is the size of target tensor,
"""
struct EinIndexer{locs,N}
cumsize::NTuple{N, Int}
end
"""
EinIndexer{locs}(size::Tuple)
Constructor for `EinIndexer` for an object of size `size` where `locs` are the
locations of relevant indices in a larger tuple.
"""
function EinIndexer{locs}(size::NTuple{N,Int}) where {N,locs}
N==0 && return EinIndexer{(),0}(())
EinIndexer{locs,N}((1,TupleTools.cumprod(size[1:end-1])...))
end
getlocs(::EinIndexer{locs,N}) where {N,locs} = locs
# get a subset of index
@inline subindex(indexer::EinIndexer, ind::CartesianIndex) = subindex(indexer, ind.I)
@inline subindex(indexer::EinIndexer{(),0}, ind::NTuple{N0,Int}) where N0 = 1
@inline @generated function subindex(indexer::EinIndexer{locs,N}, ind::NTuple{N0,Int}) where {N,N0,locs}
ex = Expr(:call, :+, map(i->i==1 ? :(ind[$(locs[i])]) : :((ind[$(locs[i])]-1) * indexer.cumsize[$i]), 1:N)...)
:(@inbounds $ex)
end
"""
EinArray{T, N, TT, LX, LY, ICT, OCT} <: AbstractArray{T, N}
A struct to hold the intermediate result of an `einsum` where all index-labels
of both input and output are expanded to a rank-`N`-array whose values
are lazily calculated.
Indices are arranged as _inner indices_ (or reduced dimensions) first and _then outer indices_.
Type parameters are
* `T`: element type,
* `N`: array dimension,
* `TT`: type of "tuple of input arrays",
* `LX`: type of "tuple of input indexers",
* `LX`: type of output indexer,
* `ICT`: typeof inner CartesianIndices,
* `OCT`: typeof outer CartesianIndices,
"""
struct EinArray{T, N, TT, LX, LY, ICT, OCT} <: AbstractArray{T, N}
xs::TT
x_indexers::LX
y_indexer::LY
size::NTuple{N, Int}
ICIS::ICT
OCIS::OCT
function EinArray{T}(xs::TT, x_indexers::LX, y_indexer::LY, size::NTuple{N, Int}, ICIS::ICT, OCIS::OCT) where {T,N,TT<:Tuple,LX<:Tuple,LY<:EinIndexer,ICT,OCT}
new{T,N,TT,LX,LY,ICT,OCT}(xs,x_indexers,y_indexer,size,ICIS,OCIS)
end
end
"""
einarray(::Val{ixs}, Val{iy}, xs, size_dict) -> EinArray
Constructor of `EinArray` from an `EinCode`, a tuple of tensors `xs` and a `size_dict` that assigns each index-label a size.
The returned `EinArray` holds an intermediate result of the `einsum` specified by the
`EinCode` with indices corresponding to all unique labels in the einsum.
Reduction over the (lazily calculated) dimensions that correspond to labels not present
in the output lead to the result of the einsum.
# example
```jldoctest; setup = :(using OMEinsum)
julia> using OMEinsum: get_size_dict
julia> a, b = rand(2,2), rand(2,2);
julia> sd = get_size_dict((('i','j'),('j','k')), (a, b));
julia> ea = OMEinsum.einarray(Val((('i','j'),('j','k'))),Val(('i','k')), (a,b), sd);
julia> dropdims(sum(ea, dims=1), dims=1) ≈ a * b
true
```
"""
@generated function einarray(::Val{ixs}, ::Val{iy}, xs::TT, size_dict) where {ixs, iy, NI, TT<:NTuple{NI,AbstractArray}}
inner_indices, outer_indices, locs_xs, locs_y = indices_and_locs(ixs, iy)
inner_indices = (inner_indices...,)
outer_indices = (outer_indices...,)
T = promote_type(eltype.(xs.parameters)...)
xind_expr = Expr(:call, :tuple, map(i->:(EinIndexer{$(locs_xs[i])}(size(xs[$i]))),1:NI)...)
quote
# find size for each leg
outer_sizes = getindex.(Ref(size_dict), $outer_indices)
inner_sizes = getindex.(Ref(size_dict), $inner_indices)
# cartesian indices for outer and inner legs
outer_ci = CartesianIndices(outer_sizes)
inner_ci = CartesianIndices(inner_sizes)
x_indexers = $(xind_expr)
y_size = getindex.(Ref(size_dict), iy)
y_indexer = EinIndexer{$locs_y}(y_size)
#EinArray{$T, $N, $TT, $LX, $LY, $ICT, $OCT}(xs, x_indexers, y_indexer, (inner_sizes...,outer_sizes...), inner_ci, outer_ci)
EinArray{$T}(xs, x_indexers, y_indexer, (inner_sizes...,outer_sizes...), inner_ci, outer_ci)
end
end
Base.size(A::EinArray) = A.size
@doc raw"
getindex(A::EinArray, inds...)
return the lazily calculated entry of `A` at index `inds`.
"
@inline Base.getindex(A::EinArray{T}, ind) where {T} = map_prod(A.xs, ind, A.x_indexers)
@inline Base.getindex(A::EinArray{T}, inds::Int...) where {T} = map_prod(A.xs, inds, A.x_indexers)
# get one element from each tensor, and multiple them
@doc raw"
map_prod(xs, ind, indexers)
calculate the value of an `EinArray` with `EinIndexer`s `indexers` at location `ind`.
"
@inline @generated function map_prod(xs::Tuple, ind, indexers::NTuple{N,Any}) where {N}
ex = Expr(:call, :*, map(i->:(xs[$i][subindex(indexers[$i], ind)]), 1:N)...)
:(@inbounds $ex)
end
@doc raw"
indices_and_locs(ixs,iy)
given the index-labels of input and output of an `einsum`, return
(in the same order):
- a tuple of the distinct index-labels of the output `iy`
- a tuple of the distinct index-labels in `ixs` of the input not appearing in the output `iy`
- a tuple of tuples of locations of an index-label in the `ixs` in a list of all index-labels
- a tuple of locations of index-labels in `iy` in a list of all index-labels
where the list of all index-labels is simply the first and the second output catenated and the second output catenated.
"
function indices_and_locs(ixs, iy)
# outer legs and inner legs
outer_indices = unique!(collect(iy))
inner_indices = setdiff!(collect(vcat(_collect.(ixs)...)), outer_indices)
# for indexing tensors (leg binding)
indices = (inner_indices...,outer_indices...)
locs_xs = map(ixs) do ix
map(i->findfirst(isequal(i), indices), ix)
end
locs_y = map(i->findfirst(isequal(i), outer_indices), iy)
return inner_indices, outer_indices, locs_xs, locs_y
end
# dynamic EinIndexer
struct DynamicEinIndexer{N}
locs::NTuple{N,Int}
cumsize::NTuple{N, Int}
end
function dynamic_indexer(locs::NTuple{N,Int}, size::NTuple{N,Int}) where {N}
N==0 && return DynamicEinIndexer{0}((),())
DynamicEinIndexer{N}(locs, (1,TupleTools.cumprod(size[1:end-1])...))
end
getlocs(d::DynamicEinIndexer{N}) where {N} = d.locs
# get a subset of index
@inline subindex(indexer::DynamicEinIndexer, ind::CartesianIndex) = subindex(indexer, ind.I)
@inline subindex(indexer::DynamicEinIndexer{0}, ind::NTuple{N0,Int}) where N0 = 1
@inline @generated function subindex(indexer::DynamicEinIndexer{N}, ind::NTuple{N0,Int}) where {N,N0}
ex = Expr(:call, :+, map(i->i==1 ? :(ind[indexer.locs[$i]]) : :((ind[indexer.locs[$i]]-1) * indexer.cumsize[$i]), 1:N)...)
:(@inbounds $ex)
end
|
proofpile-julia0005-42704 | {
"provenance": "014.jsonl.gz:242705"
} | """
This set of functions queries if a given coordinate/grid is within a region defined either
by a set of region [N,S,E,W] bounds or by longitude/latitude vectors.
Other additional features include:
* Transformation of longitude coordinates from [-180,180] to [0,360] and vice versa
"""
## Is Point in GeoRegion?
"""
isinGeoRegion(
Point :: Point2{<:Real},
geo :: GeoRegion;
tlon :: Real = 0,
tlat :: Real = 0,
throw :: Bool = true
) -> Bool
Check if a geographical point `Point` is within a GeoRegion defined by `geo`.
Arguments
=========
- `Point` : A geographical point of Type `Point2`. Pass `Point2(plon,plat)`, where `plon` and `plat` are the longitude and latitudes of the point.
- `geo` : The GeoRegion struct container
Keyword Arguments
=================
- `tlon` : Threshold for the longitude bound
- `tlat` : Threshold for the latitude bound
- `throw` : If `true`, then if `Point` is not within `geo`, an error is thrown and the program stops running.
"""
function isinGeoRegion(
Point :: Point2{<:Real},
GeoReg :: RectRegion;
tlon :: Real = 0,
tlat :: Real = 0,
throw :: Bool = true
)
if throw
@info "$(modulelog()) - Performing a check to determine if the coordinates $(Point) are within the specified region boundaries."
end
N = GeoReg.N
S = GeoReg.S
E = GeoReg.E
W = GeoReg.W
plon = Point[1]; plon = mod(plon,360)
plat = Point[2]
is180 = GeoReg.is180
is360 = GeoReg.is360
isin = true
if (plat < (S-tlat)) || (plat > (N+tlat))
isin = false
else
if is360
if !is180
if (plon < (W-tlon)) || (plon > (E+tlon)); isin = false end
else
if (plon < (W-tlon)) || (plon > (E+tlon))
plon = plon - 360
if (plon < (W-tlon)) || (plon > (E+tlon))
isin = false
end
end
end
else
if plon > 180; plon = plon - 360 end
if (plon < (W-tlon)) || (plon > (E+tlon))
if (plon < (W-tlon)) || (plon > (E+tlon))
isin = false
end
end
end
end
if !isin
if throw
error("$(modulelog()) - The requested coordinates $(Point2(plon,plat)) are not within the specified region boundaries.")
else return false
end
else
if throw
@info "$(modulelog()) - The requested coordinates $(Point2(plon,plat)) are within the specified region boundaries."
end
return true
end
end
function isinGeoRegion(
Point :: Point2{<:Real},
GeoReg :: PolyRegion;
tlon :: Real = 0,
tlat :: Real = 0,
throw :: Bool = true
)
if throw
@info "$(modulelog()) - Performing a check to determine if the coordinates $(Point) are within the specified region boundaries."
end
N = GeoReg.N
S = GeoReg.S
E = GeoReg.E
W = GeoReg.W
GeoShape = GeoReg.shape
plon = Point[1]; plon = mod(plon,360)
plat = Point[2]
is180 = GeoReg.is180
is360 = GeoReg.is360
isin = true
if (plat < (S-tlat)) || (plat > (N+tlat))
isin = false
else
if is360
if !is180
if (plon < (W-tlon)) || (plon > (E+tlon)); isin = false end
else
if (plon < (W-tlon)) || (plon > (E+tlon))
plon = plon - 360
if (plon < (W-tlon)) || (plon > (E+tlon))
isin = false
end
end
end
else
if plon > 180; plon = plon - 360 end
if (plon < (W-tlon)) || (plon > (E+tlon))
if (plon < (W-tlon)) || (plon > (E+tlon))
isin = false
end
end
end
end
if isin
isin = inpolygon(Point2(plon,plat),GeoReg.shape,in=true,on=true,out=false)
end
if !isin
if throw
error("$(modulelog()) - The requested coordinates $(Point2(plon,plat)) are not within the specified region boundaries.")
else
return false
end
else
if throw
@info "$(modulelog()) - The requested coordinates $(Point2(plon,plat)) are within the specified region boundaries."
end
return true
end
end
"""
isinGeoRegion(
Child :: GeoRegion,
polyG :: PolyRegion;
domask :: Bool = false,
throw :: Bool = true
) -> Bool
Check if a child GeoRegion defined by `Child` is within a PolyRegion `polyG`.
Arguments
=========
- `Child` : A GeoRegion that we postulate to be a "child", or a subset of the GeoRegion defined by `polyG`
- `polyG` : A GeoRegion that we postulate to be a "parent", or containing the GeoRegion defined by `Child`
Keyword Arguments
=================
- `throw` : If `true`, then if `Child` is not within `polyG`, an error is thrown and the program stops running
- `domask` : If `throw` is `false` and `domask` is `true`, return a mask (with bounds defined by the `Child` GeoRegion) showing the region where `Child` and `polyG` do not overlap
"""
function isinGeoRegion(
Child :: GeoRegion,
polyG :: PolyRegion;
domask :: Bool = false,
throw :: Bool = true
)
@info "$(modulelog()) - Performing a check to determine if the $(Child.name) GeoRegion ($(Child.regID)) is inside the $(polyG.name) GeoRegion ($(polyG.regID))"
N = Child.N
S = Child.S
E = Child.E
W = Child.W
lon = range(W,E,length=10001); nlon = length(lon)
lat = range(S,N,length=10001); nlat = length(lat)
mask = zeros(Bool,nlon,nlat)
for ilat in 1 : nlat, ilon = 1 : nlon
if isinGeoRegion(Point2(lon[ilon],lat[ilat]),Child,throw=false)
if !isinGeoRegion(Point2(lon[ilon],lat[ilat]),polyG,throw=false)
mask[ilon,ilat] = 1
end
end
end
if sum(mask) > 0
if throw
error("$(modulelog()) - The GeoRegion $(Child.regID) ($(Child.name)) is not a subset of the GeoRegion $(polyG.regID) ($(polyG.name))")
else
if domask
@warn "$(modulelog()) - The GeoRegion $(Child.regID) ($(Child.name)) is not a subset of the GeoRegion $(polyG.regID) ($(polyG.name)), returning a mask to show which regions do not intersect"
return mask
else
@warn "$(modulelog()) - The GeoRegion $(Child.regID) ($(Child.name)) is not a subset of the GeoRegion $(polyG.regID) ($(polyG.name))"
return false
end
end
else
@info "$(modulelog()) - The GeoRegion $(Child.regID) ($(Child.name)) is indeed a subset of the GeoRegion $(polyG.regID) ($(polyG.name))"
return true
end
end
"""
isinGeoRegion(
Child :: GeoRegion,
rectG :: RectRegion;
throw :: Bool = true
) -> Bool
Check if a child GeoRegion defined by `Child` is within a RectRegion `rectG`.
Arguments
=========
- `Child` : A GeoRegion that we postulate to be a "child", or a subset of the GeoRegion defined by `polyG`
- `polyG` : A GeoRegion that we postulate to be a "parent", or containing the GeoRegion defined by `Child`
Keyword Arguments
=================
- `throw` : If `true`, then if `Child` is not within `polyG`, an error is thrown and the program stops running
"""
function isinGeoRegion(
Child :: GeoRegion,
rectG :: RectRegion;
throw :: Bool = true
)
@info "$(modulelog()) - Performing a check to determine if the $(Child.name) GeoRegion ($(Child.regID)) is inside the $(rectG.name) GeoRegion ($(rectG.regID))"
isin = isgridinregion(
[Child.N,Child.S,Child.E,Child.W],
[rectG.N,rectG.S,rectG.E,rectG.W],
throw=false
)
if !isin
if throw
error("$(modulelog()) - The GeoRegion $(Child.regID) ($(Child.name)) is not a subset of the GeoRegion $(rectG.regID) ($(rectG.name))")
else
@warn "$(modulelog()) - The GeoRegion $(Child.regID) ($(Child.name)) is not a subset of the GeoRegion $(rectG.regID) ($(rectG.name))"
return false
end
else
@info "$(modulelog()) - The GeoRegion $(Child.regID) ($(Child.name)) is indeed a subset of the GeoRegion $(rectG.regID) ($(rectG.name))"
return true
end
end
|
proofpile-julia0005-42705 | {
"provenance": "014.jsonl.gz:242706"
} | function set_res(Delta_Qcon_hat, Delta_Qcon_hat_temp, cellxmax, cellymax, cellzmax)
res = zeros(cellxmax, cellymax, cellzmax, 5)
for i in 2:cellxmax-1
for j in 2:cellymax-1
for k in 2:cellzmax-1
for l in 1:5
res[i,j,k,l] = abs(Delta_Qcon_hat[i,j,k,l]-Delta_Qcon_hat_temp[i,j,k,l])
end
end
end
end
# println(res[3,3,:])
# println(dt*RHS[3,3,:])
return res
end
function check_converge(res, RHS, cellxmax, cellymax,cellzmax, init_small)
norm2 = zeros(5)
tempAxb = zeros(5)
tempb = zeros(5)
for i in 2:cellxmax-1
for j in 2:cellymax-1
for k in 2:cellzmax-1
for l in 1:5
tempAxb[l] = tempAxb[l] + res[i,j,k,l]^2
tempb[l] = tempb[l] + RHS[i,j,k,l]^2
end
end
end
end
#println(tempb)
#println(tempAxb)
#throw(UndefVarError(:x))
for l in 1:5
norm2[l] = (tempAxb[l]/(tempb[l]+init_small)) ^ 0.5
end
return norm2
end |
proofpile-julia0005-42706 | {
"provenance": "014.jsonl.gz:242707"
} | module ElectricFields
using LinearAlgebra
using StaticArrays
using Unitful
using UnitfulAtomic
# using FFTW
using ForwardDiff
using Optim
using IntervalSets
import IntervalSets: duration
using Parameters
import Base: show
using Formatting
include("units.jl")
include("rotations.jl")
include("relation_dsl.jl")
include("field_types.jl")
include("time_axis.jl")
include("carriers.jl")
include("envelopes.jl")
include("arithmetic.jl")
include("field_dsl.jl")
# include("sellmeier.jl")
# include("dispersed_fields.jl")
include("strong_field_properties.jl")
end # module
|
proofpile-julia0005-42707 | {
"provenance": "014.jsonl.gz:242708"
} | export iauSxp
"""
Multiply a p-vector by a scalar.
This function is part of the International Astronomical Union's
SOFA (Standards Of Fundamental Astronomy) software collection.
Status: vector/matrix support function.
Given:
s double scalar
p double[3] p-vector
Returned:
sp double[3] s * p
Note:
It is permissible for p and sp to be the same array.
This revision: 2013 June 18
SOFA release 2018-01-30
Copyright (C) 2018 IAU SOFA Board. See notes at end.
"""
# void iauSxp(double s, double p[3], double sp[3])
function iauSxp(s::Real, p::AbstractVector{<:Real})
sp = zeros(Float64, 3)
ccall((:iauSxp, libsofa_c), Cvoid,
(Cdouble, Ptr{Cdouble}, Ptr{Cdouble}),
convert(Float64, s),
convert(Array{Float64, 1}, p),
sp)
return SVector{3}(sp)
end |
proofpile-julia0005-42708 | {
"provenance": "014.jsonl.gz:242709"
} | # This file was generated, do not modify it. # hide
y, X = unpack(caravan, ==(:Purchase), col->true)
std = machine(Standardizer(), X)
fit!(std)
Xs = transform(std, X)
var(Xs[:,1]) |
proofpile-julia0005-42709 | {
"provenance": "014.jsonl.gz:242710"
} | module StochasticVolatility
using ArgCheck
using Distributions
using Parameters
using DynamicHMC
using StatsBase
using Base.Test
using ContinuousTransformations
export StochasticVolatility
"""
simulate_stochastic(ρ, σ_v, ϵs, νs)
Take in the parameter values (ρ, σ) for the latent volatility process, the errors ϵs used for the measurement equation and the errors νs used for the transition equation.
The discrete-time version of the Ornstein-Ulenbeck Stochastic - volatility model:
y_t = x_t + ϵ_t where ϵ_t ∼ χ^2(1)
x_t = ρ * x_(t-1) + σ * ν_t where ν_t ∼ N(0, 1)
"""
function simulate_stochastic(ρ, σ, ϵs, νs)
N = length(ϵs)
@argcheck N == length(νs)
x₀ = νs[1]*σ*(1 - ρ^2)^(-0.5)
xs = Vector{typeof(x₀)}(N)
for i in 1:N
xs[i] = (i == 1) ? x₀ : (ρ*xs[i-1] + σ*νs[i])
end
xs + log.(ϵs) + 1.27
end
simulate_stochastic(ρ, σ, N) = simulate_stochastic(ρ, σ, rand(Chisq(1), N), randn(N))
struct StochasticVolatility{T, Prior_ρ, Prior_σ, Ttrans}
"observed data"
ys::Vector{T}
"prior for ρ (persistence)"
prior_ρ::Prior_ρ
"prior for σ_v (volatility of volatility)"
prior_σ::Prior_σ
"χ^2 draws for simulation"
ϵ::Vector{T}
"Normal(0,1) draws for simulation"
ν::Vector{T}
"Transformations cached"
transformation::Ttrans
end
## In this form, we use an AR(2) process of the first differences with an intercept as the auxiliary model.
"""
lag(xs, n, K)
Lag-`n` operator on vector `xs` (maximum `K` lags).
"""
lag(xs, n, K) = xs[((K+1):end)-n]
"""
lag_matrix(xs, ns, K = maximum(ns))
Matrix with differently lagged xs.
"""
function lag_matrix(xs, ns, K = maximum(ns))
M = Matrix{eltype(xs)}(length(xs)-K, maximum(ns))
for i ∈ ns
M[:, i] = lag(xs, i, K)
end
M
end
"first auxiliary regression y, X, meant to capture first differences"
function yX1(zs, K)
Δs = diff(zs)
lag(Δs, 0, K), hcat(lag_matrix(Δs, 1:K, K), ones(eltype(zs), length(Δs)-K), lag(zs, 1, K+1))
end
"second auxiliary regression y, X, meant to capture levels"
function yX2(zs, K)
lag(zs, 0, K), hcat(ones(eltype(zs), length(zs)-K), lag_matrix(zs, 1:K, K))
end
"Constructor which calculates cached values and buffers. Use this unless you know what you are doing."
ℝto(dist::Uniform) = transformation_to(Segment(minimum(dist), maximum(dist)), LOGISTIC)
ℝto(::InverseGamma) = transformation_to(ℝ⁺)
"""
StochasticVolatility(ys, prior_ρ, prior_σ, M)
Convenience constructor for StochasticVolatility.
Take in the observed data, the priors, and number of simulations (M).
"""
function StochasticVolatility(ys, prior_ρ, prior_σ, M)
transformation = TransformationTuple((ℝto(prior_ρ), (ℝto(prior_σ))))
StochasticVolatility(ys, prior_ρ, prior_σ, rand(Chisq(1), M), randn(M), transformation)
end
function (pp::StochasticVolatility)(θ)
@unpack ys, prior_ρ, prior_σ, ν, ϵ, transformation = pp
ρ, σ = transformation(θ)
logprior = logpdf(prior_ρ, ρ) + logpdf(prior_σ, σ)
N = length(ϵ)
# Generating xs, which is the latent volatility process
xs = simulate_stochastic(ρ, σ, ϵ, ν)
Y_1, X_1 = yX1(xs, 2)
β₁ = qrfact(X_1, Val{true}) \ Y_1
v₁ = mean(abs2, Y_1 - X_1*β₁)
Y_2, X_2 = yX2(xs, 2)
β₂ = qrfact(X_2, Val{true}) \ Y_2
v₂ = mean(abs2, Y_2 - X_2*β₂)
# We work with first differences
y₁, X₁ = yX1(ys, 2)
log_likelihood1 = sum(logpdf.(Normal(0, √v₁), y₁ - X₁ * β₁))
y₂, X₂ = yX2(ys, 2)
log_likelihood2 = sum(logpdf.(Normal(0, √v₂), y₂ - X₂ * β₂))
logprior + log_likelihood1 + log_likelihood2
end
"""
path(parts...)
`parts...` appended to the directory containing `src/` etc. Useful for saving graphs and data.
"""
path(parts...) = normpath(joinpath(splitdir(Base.find_in_path("StochasticVolatility"))[1], "..", parts...))
end
|
proofpile-julia0005-42710 | {
"provenance": "014.jsonl.gz:242711"
} | using ArgParse
using JLD2
parser = ArgParseSettings(allow_ambiguous_opts=false)
@add_arg_table! parser begin
"main"
required = true
"patch"
required = true
"--overwrite"
arg_type = Bool
default = true
end
args = parse_args(parser)
results = load(args["main"])["results"];
patch = load(args["patch"])["results"];
for (noise, sigscan) in patch
for (signal, data) in sigscan
(signal in keys(results[noise])) == args["overwrite"] || error("check overwrite for results[$noise][$signal]")
results[noise][signal] = data
println("including results[$noise][$signal]")
end
end
save(args["main"], "results", results)
|
proofpile-julia0005-42711 | {
"provenance": "014.jsonl.gz:242712"
} | using OrdinaryDiffEq, Zygote, DiffEqSensitivity, Test
p_model = [1f0]
u0 = Float32.([0.0])
function dudt(du, u, p, t)
du[1] = p[1]
end
prob = ODEProblem(dudt,u0,(0f0,99.9f0))
function predict_neuralode(p)
_prob = remake(prob,p=p)
Array(solve(_prob,Tsit5(), saveat=0.1))
end
loss(p) = sum(abs2,predict_neuralode(p))/length(p)
p_model_ini = copy(p_model)
@test !iszero(Zygote.gradient(loss,p_model_ini)[1])
|
proofpile-julia0005-42712 | {
"provenance": "014.jsonl.gz:242713"
} | struct TanYam7ConstantCache{T,T2} <: OrdinaryDiffEqConstantCache
c1::T2
c2::T2
c3::T2
c4::T2
c5::T2
c6::T2
c7::T2
a21::T
a31::T
a32::T
a41::T
a43::T
a51::T
a53::T
a54::T
a61::T
a63::T
a64::T
a65::T
a71::T
a73::T
a74::T
a75::T
a76::T
a81::T
a83::T
a84::T
a85::T
a86::T
a87::T
a91::T
a93::T
a94::T
a95::T
a96::T
a97::T
a98::T
a101::T
a103::T
a104::T
a105::T
a106::T
a107::T
a108::T
b1::T
b4::T
b5::T
b6::T
b7::T
b8::T
b9::T
btilde1::T
btilde4::T
btilde5::T
btilde6::T
btilde7::T
btilde8::T
btilde9::T
btilde10::T
end
"""
On the Optimization of Some Nine-Stage Seventh-order Runge-Kutta Method, by M. Tanaka, S. Muramatsu and S. Yamashita,
Information Processing Society of Japan, Vol. 33, No. 12 (1992) pages 1512-1526.
"""
function TanYam7ConstantCache(::Type{T},::Type{T2}) where {T<:CompiledFloats,T2<:CompiledFloats}
c1 =T2(0.07816646510113846)
c2 =T2(0.1172496976517077)
c3 =T2(0.17587454647756157)
c4 =T2(0.4987401101913988)
c5 =T2(0.772121690184484)
c6 =T2(0.9911856696047768)
c7 =T2(0.9995019582097662)
a21 =T(0.07816646510113846)
a31 =T(0.029312424412926925)
a32 =T(0.08793727323878078)
a41 =T(0.04396863661939039)
a43 =T(0.13190590985817116)
a51 =T(0.7361834837738382)
a53 =T(-2.8337999624233303)
a54 =T(2.5963565888408913)
a61 =T(-12.062819391370866)
a63 =T(48.20838100175243)
a64 =T(-38.05863046463434)
a65 =T(2.6851905444372632)
a71 =T(105.21957276320198)
a73 =T(-417.92888626241256)
a74 =T(332.3155504499333)
a75 =T(-19.827591183572938)
a76 =T(1.2125399024549859)
a81 =T(114.67755718631742)
a83 =T(-455.5612169896097)
a84 =T(362.24095553923144)
a85 =T(-21.67190442182809)
a86 =T(1.3189132007137807)
a87 =T(-0.0048025566150346555)
a91 =T(115.21334870553768)
a93 =T(-457.69356568613233)
a94 =T(363.93688218862735)
a95 =T(-21.776682078900294)
a96 =T(1.3250670887878468)
a97 =T(-0.004518190986768983)
a98 =T(-0.0005320269334859959)
a101 =T(115.18928245800194)
a103 =T(-457.598022271643)
a104 =T(363.8610256312148)
a105 =T(-21.77212754027556)
a106 =T(1.3248804645074317)
a107 =T(-0.0045057252106918315)
a108 =T(-0.0005330165949429136)
b1 =T(0.05126014249744686)
b4 =T(0.27521638456212627)
b5 =T(0.33696650340710543)
b6 =T(0.18986072244906577)
b7 =T(8.461099418514403)
b8 =T(-130.15941672640542)
b9 =T(121.84501355497527)
# bhat1 =T(0.051002417590377186)
# bhat4 =T(0.27613929504666546)
# bhat5 =T(0.333788602069686)
# bhat6 =T(0.20196531139081392)
# bhat7 =T(5.755075459041811)
# bhat8 =T(-85.61797108513936)
# bhat10=T(80)
btilde1 =T(-0.0002577249070696835)
btilde4 =T(0.0009229104845391819)
btilde5 =T(-0.0031779013374194105)
btilde6 =T(0.01210458894174817)
btilde7 =T(-2.706023959472591)
btilde8 =T(44.541445641266066)
btilde9 =T(-121.84501355497527)
btilde10=T(80)
TanYam7ConstantCache(c1,c2,c3,c4,c5,c6,c7,a21,a31,a32,a41,a43,a51,a53,a54,a61,a63,a64,a65,a71,a73,a74,a75,a76,a81,a83,a84,a85,a86,a87,a91,a93,a94,a95,a96,a97,a98,a101,a103,a104,a105,a106,a107,a108,b1,b4,b5,b6,b7,b8,b9,btilde1,btilde4,btilde5,btilde6,btilde7,btilde8,btilde9,btilde10)
end
"""
On the Optimization of Some Nine-Stage Seventh-order Runge-Kutta Method, by M. Tanaka, S. Muramatsu and S. Yamashita,
Information Processing Society of Japan, Vol. 33, No. 12 (1992) pages 1512-1526.
"""
function TanYam7ConstantCache(T::Type,T2::Type)
c1 =T2(36259//463869)
c2 =T2(36259//309246)
c3 =T2(36259//206164)
c4 =T2(76401//153188)
c5 =T2(5164663400901569152//6688924124988083687)
c6 =T2(3486//3517)
c7 =T2(44151//44173)
a21 =T(36259//463869)
a31 =T(36259//1236984)
a32 =T(36259//412328)
a41 =T(36259//824656)
a43 =T(108777//824656)
a51 =T(17751533712975975593187//24112920357813127230992)
a53 =T(-68331192803887602162951//24112920357813127230992)
a54 =T(7825717455900471140481//3014115044726640903874)
a61 =T(-parse(BigInt,"2425518501234340256175806929031336393991001205323654593685210322030691047097621496102266496")//parse(BigInt,"201073929944556265242953373967503382318096046559546854970564286270157897072030532387737241"))
a63 =T(parse(BigInt,"126875939114499086848675646731069753055308007638565564293214808307459627250976287910912")//parse(BigInt,"2631823273838775215546306644775636213113650954300949659959480717139276934490785884841"))
a64 =T(-parse(BigInt,"18238165682427587123600563411903599919711680222699338744428834349094610403849667513626245575680")//parse(BigInt,"479212348415302218688412787744011607018072627155280851781784515530195350673833210517995172867"))
a65 =T(parse(BigInt,"74777425357290689294313120787550134201356775453356604582280658347816977407509825814840320")//parse(BigInt,"27848089034948481594251542168496834020714916243735255636715495143105044359669539013058107"))
a71 =T(parse(BigInt,"42210784012026021620512889337138957588173072058924928398799062235")//parse(BigInt,"401168555464694196502745570125544252560955194769351196028554688"))
a73 =T(-parse(BigInt,"53537582181289418572806048482253962781541488")//parse(BigInt,"128102133978061070595749084326726258918069"))
a74 =T(parse(BigInt,"6373437319382536771018620806214785516542915567996760353063349991182871200304")//parse(BigInt,"19178871740288180724887392022898914045213833131528843480576173243533301485"))
a75 =T(-parse(BigInt,"836513109281956728811652083904588515347012294160401579661057793958992")//parse(BigInt,"42189346226535262916910956145917457264775063492307360825161811325023"))
a76 =T(parse(BigInt,"10038768138260655813133796321688310283082351149893792474426644227234755871856831386997923013888351")//parse(BigInt,"8279123943002224665888560854425725483235895533066047643118716510648226939201056966728652698557760"))
a81 =T(parse(BigInt,"1454976871505621321312348899226731229297985195430097820532172928754404221419640982320963761")//parse(BigInt,"12687546780768188413911065021432924447284583965992535848754097389537051103097048673168256"))
a83 =T(-parse(BigInt,"1452249436938195913836212549773886207822959770792")//parse(BigInt,"3187825000852340545619892931005470986913487349"))
a84 =T(parse(BigInt,"3193785703967379485471835519262043520640585789136428552340853315619929163223926155626278646291801931779256")//parse(BigInt,"8816743814108800069900425523882492176796603795861854625575345408990649746129323017714575203134405597571"))
a85 =T(-parse(BigInt,"314398569508916946629277462588835135011587938712337655816458752800894863689255534896547161759213480")//parse(BigInt,"14507196201560052990013371105817112064769849230048646555812475120383456376679192045076337148816813"))
a86 =T(parse(BigInt,"5021633516852870452803558794670341128133410978274753232000155240629688617274518068065484524425884625107263111090060721584249881611265924113")//parse(BigInt,"3807402575192378287101053794016079417728266285278436439472658972755893033722804748992796724254152818232996309281540415603729279478920107136"))
a87 =T(-parse(BigInt,"894451839895008223904010765658125850176064186717638397881061173697811879745")//parse(BigInt,"186244934020117483847289332768639722211239803963523669807238114327710091115676"))
a91 =T(parse(BigInt,"152015786770038627019906826956584678402371493198250158080970494807155603994339")//parse(BigInt,"1319428594672311986480108760138089275639618425553698631283119461253421932416"))
a93 =T(-parse(BigInt,"19887569115365707672105043997835466942389220328")//parse(BigInt,"43451712251082409470704235239058276887205131"))
a94 =T(parse(BigInt,"6298831527954572673520838478029639446424615570453903300371170696118960335541193275024146681623960")//parse(BigInt,"17307483347318198085207889427954666589398911583434527253470846782562794571553580157056644256313"))
a95 =T(-parse(BigInt,"16267621644623777942279856217571823792451732234540266142050307930357537283432611648312520")//parse(BigInt,"747020211145282116967827947968990352912884924402891384654470989583659988117513448655559"))
a96 =T(parse(BigInt,"491920517345271821393960134665582163547632868347911487496995665146055538579545277983570189994492481977206720065882583432234119698425636137169515")//parse(BigInt,"371241970695441505578374965290296000309261530083026613438333515399198575394818137422626328203755084156959422247928840402063855870066548878130304"))
a97 =T(-parse(BigInt,"17535891839112183607157943692398769696531153719141528498448224128785868799210475")//parse(BigInt,"3881175428498724209649715816699297677268154716152409333146177577349474565697791732"))
a98 =T(-parse(BigInt,"31140449219386755112730831706895080247696102690585728771850210691242594436100540310")//parse(BigInt,"58531715707220748822628340615174217489020037018063169180406742622693159384762890406389"))
a101 =T(parse(BigInt,"24861126512935523838485032295435745281790804119672244200744512677831357181363")//parse(BigInt,"215828469302253893975010055544246846578750854407392771457340001283636121600"))
a103 =T(-76626859319946149305867456329803//167454524692981091214376557800)
a104 =T(parse(BigInt,"257532657386915224604779230484778835596042580268896440943054087972106955277512448850995064336363")//parse(BigInt,"707777528357579864776572552477247532276956780876653359042572831013312547307465249178438602200"))
a105 =T(-parse(BigInt,"103092665221253777021612043042409780416654274677686197534469014507504059634284484983141143")//parse(BigInt,"4735075386204034224907103653335170134874540866215348781137359896717512695961598377363000"))
a106 =T(parse(BigInt,"1318945254307068672853031172410281620677291556423152759282406612372948205789241763483098989903852936890735513699395545618802215742952753372919")//parse(BigInt,"995520191927224509158660659519643916330847017611189618002256023928790665495276022949114110343406997764203331763292012060684160018593393766400"))
a107 =T(-parse(BigInt,"2175691361381933486174620849991740173349017185199505364607841")//parse(BigInt,"482872625303278742130341621563226511344221688759361797916327450"))
a108 =T(-parse(BigInt,"11327601987184122343710458559595782081610122892585097")//parse(BigInt,"21251874884678431935286330856983429378055579208005268000"))
b1 =T(parse(BigInt,"677260699094873524061210073954310211")//parse(BigInt,"13212228177645157882237395248920447488"))
b4 =T(parse(BigInt,"5627843976805934592544586970647029617399366281651959837492864")//parse(BigInt,"20448796992082885248862284273169726631726393791864145954479875"))
b5 =T(parse(BigInt,"1359735671458057021603668186882234273947181034928034734244224")//parse(BigInt,"4035225037829041960922838374222759264846456609494840689395475"))
b6 =T(parse(BigInt,"3575764371063841994042920363615768888383369782579963896064642431626191680598750790399139608006651160426580137040859330533720256407")//parse(BigInt,"18833618269956378326078572170759846509476617594300797062242096554507068838086062412372695473217373611870290738365243380652826304000"))
b7 =T(parse(BigInt,"14322850798205614664394883796805489119964080948503151")//parse(BigInt,"1692788382425178679633337406927131793062126418747780"))
b8 =T(-parse(BigInt,"16735096417960349589058935251250023138290806176584545269411")//parse(BigInt,"128573843052304513208482301684749747737236254208431871400"))
b9 =T(33050288141543277444692395096256051//271248590133163812341791503489000)
# bhat1 =T(parse(BigInt,"962650826879437817605721930727384851")//parse(BigInt,"18874611682350225546053421784172067840"))
# bhat4 =T(parse(BigInt,"99703652969826806275610089806158069716600653757297413344")//parse(BigInt,"361062893830367886445877712954352019629670588714825566425"))
# bhat5 =T(parse(BigInt,"17540887447270394964911517553576959050951784592644178144")//parse(BigInt,"52550888012671962193116521992300249584518949945887203425"))
# bhat6 =T(parse(BigInt,"101855668513773837712956593596043266148479443244790887636953159551191054134940671472736229702711787350735239179")//parse(BigInt,"504322587935299170723833764883183242017770187561624249681119708768991642691172146267201689787026963930014131200"))
# bhat7 =T(parse(BigInt,"179578338747395946570172802104016572846366090083599")//parse(BigInt,"31203472487100067827342625012481692038011546889360"))
# bhat8 =T(-parse(BigInt,"500374162579884236288722085953024481890963958534161489781")//parse(BigInt,"5844265593286568782203740985670443078965284282201448700"))
# bhat10=T(80)
btilde1 =T(parse(BigInt,"-11350400930890172457349074817136051")//parse(BigInt,"44040760592150526274124650829734824960"))
btilde4 =T(parse(BigInt,"18872409140206580874590465524732661000311743892579167244576")//parse(BigInt,"20448796992082885248862284273169726631726393791864145954479875"))
btilde5 =T(parse(BigInt,"-4274515681501734477669162831906773100582117137555409033632")//parse(BigInt,"1345075012609680653640946124740919754948818869831613563131825"))
btilde6 =T(parse(BigInt,"151982138295746861476872192192436808638630079892291260212523545728864842939698890632387350810275352451114165347163409431707619557")//parse(BigInt,"12555745513304252217385714780506564339651078396200531374828064369671379225390708274915130315478249074580193825576828920435217536000"))
btilde7 =T(parse(BigInt,"-6107634561545846083950679043550120057398294081957207")//parse(BigInt,"2257051176566904906177783209236175724082835224997040"))
btilde8 =T(parse(BigInt,"1908954947067632130235683120094494845563199696277664164743")//parse(BigInt,"42857947684101504402827433894916582579078751402810623800"))
btilde9 =T(-33050288141543277444692395096256051//271248590133163812341791503489000)
btilde10=T(80)
TanYam7ConstantCache(c1,c2,c3,c4,c5,c6,c7,a21,a31,a32,a41,a43,a51,a53,a54,a61,a63,a64,a65,a71,a73,a74,a75,a76,a81,a83,a84,a85,a86,a87,a91,a93,a94,a95,a96,a97,a98,a101,a103,a104,a105,a106,a107,a108,b1,b4,b5,b6,b7,b8,b9,btilde1,btilde4,btilde5,btilde6,btilde7,btilde8,btilde9,btilde10)
end
struct TsitPap8ConstantCache{T,T2} <: OrdinaryDiffEqConstantCache
c1::T2
c2::T2
c3::T2
c4::T2
c5::T2
c6::T2
c7::T2
c8::T2
c9::T2
c10::T2
a0201::T
a0301::T
a0302::T
a0401::T
a0403::T
a0501::T
a0503::T
a0504::T
a0601::T
a0604::T
a0605::T
a0701::T
a0704::T
a0705::T
a0706::T
a0801::T
a0804::T
a0805::T
a0806::T
a0807::T
a0901::T
a0904::T
a0905::T
a0906::T
a0907::T
a0908::T
a1001::T
a1004::T
a1005::T
a1006::T
a1007::T
a1008::T
a1009::T
a1101::T
a1104::T
a1105::T
a1106::T
a1107::T
a1108::T
a1109::T
a1110::T
a1201::T
a1204::T
a1205::T
a1206::T
a1207::T
a1208::T
a1209::T
a1210::T
a1211::T
a1301::T
a1304::T
a1305::T
a1306::T
a1307::T
a1308::T
a1309::T
a1310::T
b1::T
b6::T
b7::T
b8::T
b9::T
b10::T
b11::T
b12::T
btilde1::T
btilde6::T
btilde7::T
btilde8::T
btilde9::T
btilde10::T
btilde11::T
btilde12::T
btilde13::T
end
"""
Cheap Error Estimation for Runge-Kutta methods, by Ch. Tsitouras and S.N. Papakostas,
Siam Journal on Scientific Computing, Vol. 20, Issue 6, Nov 1999.
"""
function TsitPap8ConstantCache(::Type{T},::Type{T2}) where {T<:CompiledFloats,T2<:CompiledFloats}
c1 =T2(0.06338028169014084)
c2 =T2(0.1027879458763643)
c3 =T2(0.15418191881454646)
c4 =T2(0.3875968992248062)
c5 =T2(0.4657534246575342)
c6 =T2(0.1554054054054054)
c7 =T2(1.0070921985815602)
c8 =T2(0.876141078561489)
c9 =T2(0.9120879120879121)
c10 =T2(0.959731543624161)
a0201 =T(0.06338028169014084)
a0301 =T(0.019438980427336498)
a0302 =T(0.08334896544902781)
a0401 =T(0.038545479703636615)
a0403 =T(0.11563643911090984)
a0501 =T(0.39436557770112496)
a0503 =T(-1.4818719321673373)
a0504 =T(1.4751032536910185)
a0601 =T(0.045994489107698204)
a0604 =T(0.23235070626395474)
a0605 =T(0.18740822928588133)
a0701 =T(0.06005228953244051)
a0704 =T(0.11220383194636775)
a0705 =T(-0.03357232951906142)
a0706 =T(0.016721613445658576)
a0801 =T(-1.5733292732086857)
a0804 =T(-1.3167087730223663)
a0805 =T(-11.723515296181773)
a0806 =T(9.107825028173872)
a0807 =T(6.512820512820513)
a0901 =T(-0.48107625624391254)
a0904 =T(-6.6506103607463904)
a0905 =T(-4.530206099782572)
a0906 =T(3.894414525020157)
a0907 =T(8.634217645525526)
a0908 =T(0.009401624788681498)
a1001 =T(-0.7754121446230569)
a1004 =T(-7.996604718235832)
a1005 =T(-6.726558607230182)
a1006 =T(5.532184454327406)
a1007 =T(10.89757332024991)
a1008 =T(0.020091650280045396)
a1009 =T(-0.039186042680376856)
a1101 =T(-1.1896363245449992)
a1104 =T(-7.128368483301214)
a1105 =T(-9.53722789710108)
a1106 =T(7.574470108980868)
a1107 =T(11.267486382070919)
a1108 =T(0.051009801223058315)
a1109 =T(0.08019413469508256)
a1110 =T(-0.15819617839847347)
a1201 =T(-0.39200039047127266)
a1204 =T(3.916659042493856)
a1205 =T(-2.8017459289080557)
a1206 =T(2.441204566481742)
a1207 =T(-2.4183655778824718)
a1208 =T(-0.33943326290032927)
a1209 =T(0.19496450383103364)
a1210 =T(-0.19437176762508154)
a1211 =T(0.5930888149805791)
a1301 =T(-1.4847063081291894)
a1304 =T(-2.390723588981498)
a1305 =T(-11.184306772840532)
a1306 =T(8.720804667459817)
a1307 =T(7.33673830753461)
a1308 =T(0.01289874999394761)
a1309 =T(0.042583289842657704)
a1310 =T(-0.05328834487981156)
b1 =T(0.04441161093250152)
b6 =T(0.35395063113733116)
b7 =T(0.2485219684184965)
b8 =T(-0.3326913171720666)
b9 =T(1.921248828652836)
b10 =T(-2.7317783000882523)
b11 =T(1.4012004409899175)
b12 =T(0.0951361371292365)
# bhat1 =T(0.044484201850329544)
# bhat6 =T(0.35502352274458154)
# bhat7 =T(0.24825530158391707)
# bhat8 =T(-2.4242252962684616)
# bhat9 =T(1.5999301534099692)
# bhat10=T(-1.8107646286929684)
# bhat13=T(2.9872967453726327)
btilde1 =T(-7.259091782802626e-5)
btilde6 =T(-0.0010728916072503584)
btilde7 =T(0.0002666668345794398)
btilde8 =T(2.091533979096395)
btilde9 =T(0.3213186752428666)
btilde10=T(-0.921013671395284)
btilde11=T(1.4012004409899175)
btilde12=T(0.0951361371292365)
btilde13=T(-2.9872967453726327)
TsitPap8ConstantCache(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,a0201,a0301,a0302,a0401,a0403,a0501,a0503,a0504,a0601,a0604,a0605,a0701,a0704,a0705,a0706,a0801,a0804,a0805,a0806,a0807,a0901,a0904,a0905,a0906,a0907,a0908,a1001,a1004,a1005,a1006,a1007,a1008,a1009,a1101,a1104,a1105,a1106,a1107,a1108,a1109,a1110,a1201,a1204,a1205,a1206,a1207,a1208,a1209,a1210,a1211,a1301,a1304,a1305,a1306,a1307,a1308,a1309,a1310,b1,b6,b7,b8,b9,b10,b11,b12,btilde1,btilde6,btilde7,btilde8,btilde9,btilde10,btilde11,btilde12,btilde13)
end
"""
Cheap Error Estimation for Runge-Kutta methods, by Ch. Tsitouras and S.N. Papakostas,
Siam Journal on Scientific Computing, Vol. 20, Issue 6, Nov 1999.
"""
function TsitPap8ConstantCache(T::Type,T2::Type)
c1 =T2(9//142)
c2 =T2(24514//238491)
c3 =T2(12257//79497)
c4 =T2(50//129)
c5 =T2(34//73)
c6 =T2(23//148)
c7 =T2(142//141)
c8 =T2(29104120198//33218531707)
c9 =T2(83//91)
c10 =T2(143//149)
a0201 =T(9//142)
a0301 =T(9950845450//511901613729)
a0302 =T(42666469916//511901613729)
a0401 =T(12257//317988)
a0403 =T(12257//105996)
a0501 =T(14131686489425//35833975601529)
a0503 =T(-17700454220625//11944658533843)
a0504 =T(17619604667500//11944658533843)
a0601 =T(4418011//96055225)
a0604 =T(9757757988//41995818067)
a0605 =T(19921512441//106300094275)
a0701 =T(480285555889619//7997789253816320)
a0704 =T(52162106556696460538643//464887033284472899365888)
a0705 =T(-16450769949384489//490009784398315520)
a0706 =T(864813381633887//51718297665732608)
a0801 =T(-7001048352088587137143//4449830351030385148800)
a0804 =T(-2522765548294599044197935933//1915963195493758447677901792)
a0805 =T(-318556182235222634647116091//27172411532484037214054400)
a0806 =T(1205563885850790193966807//132365727505912221222912)
a0807 =T(254//39)
a0901 =T(-parse(BigInt,"20629396399716689122801179264428539394462855874226604463554767070845753369")//parse(BigInt,"42881759662770155956657513470012114488076017981128105307375594963152353200"))
a0904 =T(-parse(BigInt,"3315443074343659404779422149387397712986453181141168247590906370819301077749322753")//parse(BigInt,"498517112641608872807821838566847987729514312398278633788185835348997643218553476"))
a0905 =T(-parse(BigInt,"273749409411654060286948141164828452109898379203526945684314474186724062841643")//parse(BigInt,"60427583951377552503967840653825589117167443021628367691773162913607984889600"))
a0906 =T(parse(BigInt,"16656372518874512738268060504309924900437672263609245028809229865738327731797537")//parse(BigInt,"4276990138533930522782076385771016009097627930550149628282203007913538394549504"))
a0907 =T(parse(BigInt,"42008080033354305590804322944084805264441066760038302359736803632")//parse(BigInt,"4865302423216534910074823287811605599629170295030631799935804001"))
a0908 =T(parse(BigInt,"668459780930716338000066627236927417191947396177093524377824")//parse(BigInt,"71100452948884643779799087713322002499799983434685642170251833"))
a1001 =T(-parse(BigInt,"1793603946322260900828212460706877142477132870159527")//parse(BigInt,"2313097568511990753781649719556084665131024900300800"))
a1004 =T(-parse(BigInt,"14776874123722838192315406145167687512425345723701")//parse(BigInt,"1847893530366076701102014146927696206329050105856"))
a1005 =T(-parse(BigInt,"19587020919884661714856757105130246995757906603")//parse(BigInt,"2911893296942532038566182868170393629789388800"))
a1006 =T(parse(BigInt,"6364380863259071677112259236455506477417699780613300364807")//parse(BigInt,"1150428174584942133406579091549443438814988349188394057728"))
a1007 =T(parse(BigInt,"27725164402569748756040320433848245155581006369")//parse(BigInt,"2544159473655547770881695354241256106302348256"))
a1008 =T(parse(BigInt,"10744247163960019876833255044784609639")//parse(BigInt,"534761804739901348825491947768304503296"))
a1009 =T(-parse(BigInt,"50977737930792808232204417497248979399878217280011103197862899")//parse(BigInt,"1300915694564913675613280314081837358644964393191337994183389184"))
a1101 =T(-parse(BigInt,"3587625717068952487214493441966897048737050755812600710793")//parse(BigInt,"3015733164033229624772006086429467685046639983706974412800"))
a1104 =T(-parse(BigInt,"5453011711267804731211501837262816944661201619903")//parse(BigInt,"764973320899716397072448644710733101329290196992"))
a1105 =T(-parse(BigInt,"884348836774584715070440485633026464653487653")//parse(BigInt,"92725983515963799584249219499787659014963200"))
a1106 =T(parse(BigInt,"26823469063654084387375587616552322383082061411417182757389742951")//parse(BigInt,"3541299744763681675473620647087123057228744296123642301654630400"))
a1107 =T(parse(BigInt,"142363419491686507162007051071007722765323162710521029")//parse(BigInt,"12634887202368261565807449771335728082273587905775840"))
a1108 =T(parse(BigInt,"64747617454909275289531520412519442831235890581")//parse(BigInt,"1269317188117975960996670628974453443777854830080"))
a1109 =T(parse(BigInt,"112633808253272720979874303367503891597499261046700689572459050065039333987335667")//parse(BigInt,"1404514291245034532812181377119034501014039830518218465064579291611563374608650240"))
a1110 =T(-parse(BigInt,"10612202518573994431153697720606405883")//parse(BigInt,"67082546658259846778754594976831647575"))
a1201 =T(-parse(BigInt,"7534081165544982478296202335922049210803875045423")//parse(BigInt,"19219575665440848756074598051658416387002479820800"))
a1204 =T(parse(BigInt,"237696087452786717802270375283034262859273455")//parse(BigInt,"60688480889936839261131818793519097177034752"))
a1205 =T(-parse(BigInt,"20610578209826329263318986584876108069323")//parse(BigInt,"7356333776438834884293014575840932659200"))
a1206 =T(parse(BigInt,"51260471529841028040709654458903254781320136131844164563")//parse(BigInt,"20998023776318546907382302106788168158765514131585630208"))
a1207 =T(-parse(BigInt,"3077214437173472971196810795615384000211457151011")//parse(BigInt,"1272435592582280820597059432060116893200684680384"))
a1208 =T(-parse(BigInt,"1539218116260541896259682954580256454049")//parse(BigInt,"4534670830750360983422999946409310393344"))
a1209 =T(parse(BigInt,"241886539350268429372116296787271276553970618941104594460614948326132797451456131")//parse(BigInt,"1240669632662465892528916120041123051054026283188324780101555345343662316090597376"))
a1210 =T(-80556486832245966191717452425924975//414445409518676597565032008051106461)
a1211 =T(parse(BigInt,"2944781680874500347594142792814463350")//parse(BigInt,"4965161383073676983610218096030654529"))
a1301 =T(-parse(BigInt,"7757739937862944832927743694336116203639371542761")//parse(BigInt,"5225100678421794325654850845451340473577260032000"))
a1304 =T(-parse(BigInt,"433889546009521405913741133329446636837810749")//parse(BigInt,"181488796115643001621310691169322233346938880"))
a1305 =T(-parse(BigInt,"246044720162308748108107126829066792329071")//parse(BigInt,"21999103311417807170100413255475150848000"))
a1306 =T(parse(BigInt,"2140331235425829844389060818616719848637810765257179167")//parse(BigInt,"245428182036011897638028252815369258646052549878743040"))
a1307 =T(parse(BigInt,"1573990926219809229258666534611598771063240529")//parse(BigInt,"214535514317495538101650508596881057760818080"))
a1308 =T(parse(BigInt,"62408280667309375445301959066100433563")//parse(BigInt,"4838320046251983849512355559327451645440"))
a1309 =T(parse(BigInt,"1145609822249493677618113725359506642998153205603226883141207089968379")//parse(BigInt,"26902802166822768476367427840835743139559294105398677975568538466119680"))
a1310 =T(-408950356875874683139089678053832//7674292714443204070455739595109785)
b1 =T(55038446513529253801//1239280570055853383520)
b6 =T(2335496795323782464411846394611//6598368783294020109895379936256)
b7 =T(7636073376527143565375240869888//30725949199261296642046754748645)
b8 =T(-4237087214169934312729607487//12735791394214625116604076160)
b9 =T(parse(BigInt,"408505291291133241760995514121984335914363927884426780078325258228227984174126699")//parse(BigInt,"212624874612193697466655159405635202123821166475348052734199905546455860773575680"))
b10 =T(-1108225296327029096435947//405679075893979729103310)
b11 =T(2460988291206213825688467985//1756342789520947764222671739)
b12 =T(4808707937311//50545545388065)
# bhat1 =T(715953338020208413//16094552857868225760)
# bhat6 =T(62284335162928966987066121//175437206755843240272669696)
# bhat7 =T(184309146777472302831695872//742417767522160213324368465)
# bhat8 =T(-509771598215811385123057257//210282269969085698264101760)
# bhat9 =T(parse(BigInt,"2701602489646143640362891402924962500766379885231830470264478480621389")//parse(BigInt,"1688575269294196295712420798364925687791337062814161130291144634516480"))
# bhat10=T(-7218534073012286740367561//3986456306153224954098780)
# bhat13=T(5752173075461//1925544586212)
btilde1 =T(-1124506425334925//15491007125698167294)
btilde6 =T(-34035261967014004512968665//31722926842759712066804711232)
btilde7 =T(24580774837247048845194838016//92177847597783889926140264245935)
btilde8 =T(7658235379858959628545472335//3661540025836704721023671896)
btilde9 =T(parse(BigInt,"1510930663253486646384296195586886863633653147150681174690536256975353069486325")//parse(BigInt,"4702280880846591386281796794547701585430660412435581935467882526508158459415616"))
btilde10=T(-237184116991804346989794715//257525077377498332034781188)
btilde11=T(2460988291206213825688467985//1756342789520947764222671739)
btilde12=T(4808707937311//50545545388065)
btilde13=T(-5752173075461//1925544586212)
TsitPap8ConstantCache(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,a0201,a0301,a0302,a0401,a0403,a0501,a0503,a0504,a0601,a0604,a0605,a0701,a0704,a0705,a0706,a0801,a0804,a0805,a0806,a0807,a0901,a0904,a0905,a0906,a0907,a0908,a1001,a1004,a1005,a1006,a1007,a1008,a1009,a1101,a1104,a1105,a1106,a1107,a1108,a1109,a1110,a1201,a1204,a1205,a1206,a1207,a1208,a1209,a1210,a1211,a1301,a1304,a1305,a1306,a1307,a1308,a1309,a1310,b1,b6,b7,b8,b9,b10,b11,b12,btilde1,btilde6,btilde7,btilde8,btilde9,btilde10,btilde11,btilde12,btilde13)
end
struct DP8ConstantCache{T,T2} <: OrdinaryDiffEqConstantCache
c7::T2
c8::T2
c9::T2
c10::T2
c11::T2
c6::T2
c5::T2
c4::T2
c3::T2
c2::T2
b1::T
b6::T
b7::T
b8::T
b9::T
b10::T
b11::T
b12::T
btilde1::T
btilde6::T
btilde7::T
btilde8::T
btilde9::T
btilde10::T
btilde11::T
btilde12::T
er1::T
er6::T
er7::T
er8::T
er9::T
er10::T
er11::T
er12::T
a0201::T
a0301::T
a0302::T
a0401::T
a0403::T
a0501::T
a0503::T
a0504::T
a0601::T
a0604::T
a0605::T
a0701::T
a0704::T
a0705::T
a0706::T
a0801::T
a0804::T
a0805::T
a0806::T
a0807::T
a0901::T
a0904::T
a0905::T
a0906::T
a0907::T
a0908::T
a1001::T
a1004::T
a1005::T
a1006::T
a1007::T
a1008::T
a1009::T
a1101::T
a1104::T
a1105::T
a1106::T
a1107::T
a1108::T
a1109::T
a1110::T
a1201::T
a1204::T
a1205::T
a1206::T
a1207::T
a1208::T
a1209::T
a1210::T
a1211::T
c14::T2
c15::T2
c16::T2
a1401::T
a1407::T
a1408::T
a1409::T
a1410::T
a1411::T
a1412::T
a1413::T
a1501::T
a1506::T
a1507::T
a1508::T
a1511::T
a1512::T
a1513::T
a1514::T
a1601::T
a1606::T
a1607::T
a1608::T
a1609::T
a1613::T
a1614::T
a1615::T
d401::T
d406::T
d407::T
d408::T
d409::T
d410::T
d411::T
d412::T
d413::T
d414::T
d415::T
d416::T
d501::T
d506::T
d507::T
d508::T
d509::T
d510::T
d511::T
d512::T
d513::T
d514::T
d515::T
d516::T
d601::T
d606::T
d607::T
d608::T
d609::T
d610::T
d611::T
d612::T
d613::T
d614::T
d615::T
d616::T
d701::T
d706::T
d707::T
d708::T
d709::T
d710::T
d711::T
d712::T
d713::T
d714::T
d715::T
d716::T
end
function DP8ConstantCache(::Type{T},::Type{T2}) where {T<:CompiledFloats,T2<:CompiledFloats}
c7 = T2(0.25)
c8 = T2(0.3076923076923077)
c9 = T2(0.6512820512820513)
c10 = T2(0.6)
c11 = T2(0.8571428571428571)
c6 = T2(0.3333333333333333)
c5 = T2(0.2816496580927726)
c4 = T2(0.1183503419072274)
c3 = T2(0.0789002279381516)
c2 = T2(0.05260015195876773)
b1 = T(0.054293734116568765)
b6 = T(4.450312892752409)
b7 = T(1.8915178993145003)
b8 = T(-5.801203960010585)
b9 = T(0.3111643669578199)
b10 = T(-0.1521609496625161)
b11 = T(0.20136540080403034)
b12 = T(0.04471061572777259)
# bhh1 = T(0.2440944881889764)
# bhh2 = T(0.7338466882816118)
# bhh3 = T(0.022058823529411766)
btilde1 = T(-0.18980075407240762)
btilde6 = T(4.450312892752409)
btilde7 = T(1.8915178993145003)
btilde8 = T(-5.801203960010585)
btilde9 = T(-0.42268232132379197)
btilde10 = T(-0.1521609496625161)
btilde11 = T(0.20136540080403034)
btilde12 = T(0.022651792198360825)
er1 = T(0.01312004499419488)
er6 = T(-1.2251564463762044)
er7 = T(-0.4957589496572502)
er8 = T(1.6643771824549864)
er9 = T(-0.35032884874997366)
er10 = T(0.3341791187130175)
er11 = T(0.08192320648511571)
er12 = T(-0.022355307863886294)
a0201 = T(0.05260015195876773)
a0301 = T(0.0197250569845379)
a0302 = T(0.0591751709536137)
a0401 = T(0.02958758547680685)
a0403 = T(0.08876275643042054)
a0501 = T(0.2413651341592667)
a0503 = T(-0.8845494793282861)
a0504 = T(0.924834003261792)
a0601 = T(0.037037037037037035)
a0604 = T(0.17082860872947386)
a0605 = T(0.12546768756682242)
a0701 = T(0.037109375)
a0704 = T(0.17025221101954405)
a0705 = T(0.06021653898045596)
a0706 = T(-0.017578125)
a0801 = T(0.03709200011850479)
a0804 = T(0.17038392571223998)
a0805 = T(0.10726203044637328)
a0806 = T(-0.015319437748624402)
a0807 = T(0.008273789163814023)
a0901 = T(0.6241109587160757)
a0904 = T(-3.3608926294469414)
a0905 = T(-0.868219346841726)
a0906 = T(27.59209969944671)
a0907 = T(20.154067550477894)
a0908 = T(-43.48988418106996)
a1001 = T(0.47766253643826434)
a1004 = T(-2.4881146199716677)
a1005 = T(-0.590290826836843)
a1006 = T(21.230051448181193)
a1007 = T(15.279233632882423)
a1008 = T(-33.28821096898486)
a1009 = T(-0.020331201708508627)
a1101 = T(-0.9371424300859873)
a1104 = T(5.186372428844064)
a1105 = T(1.0914373489967295)
a1106 = T(-8.149787010746927)
a1107 = T(-18.52006565999696)
a1108 = T(22.739487099350505)
a1109 = T(2.4936055526796523)
a1110 = T(-3.0467644718982196)
a1201 = T(2.273310147516538)
a1204 = T(-10.53449546673725)
a1205 = T(-2.0008720582248625)
a1206 = T(-17.9589318631188)
a1207 = T(27.94888452941996)
a1208 = T(-2.8589982771350235)
a1209 = T(-8.87285693353063)
a1210 = T(12.360567175794303)
a1211 = T(0.6433927460157636)
c14,c15,c16,a1401,a1407,a1408,a1409,a1410,a1411,a1412,a1413,a1501,a1506,a1507,a1508,a1511,a1512,a1513,a1514,a1601,a1606,a1607,a1608,a1609,a1613,a1614,a1615 = DP8Interp(T,T2)
d401,d406,d407,d408,d409,d410,d411,d412,d413,d414,d415,d416,d501,d506,d507,d508,d509,d510,d511,d512,d513,d514,d515,d516,d601,d606,d607,d608,d609,d610,d611,d612,d613,d614,d615,d616,d701,d706,d707,d708,d709,d710,d711,d712,d713,d714,d715,d716 = DP8Interp_polyweights(T)
DP8ConstantCache(c7,c8,c9,c10,c11,c6,c5,c4,c3,c2,b1,b6,b7,b8,b9,b10,b11,b12,btilde1,btilde6,btilde7,btilde8,btilde9,btilde10,btilde11,btilde12,er1,er6,er7,er8,er9,er10,er11,er12,a0201,a0301,a0302,a0401,a0403,a0501,a0503,a0504,a0601,a0604,a0605,a0701,a0704,a0705,a0706,a0801,a0804,a0805,a0806,a0807,a0901,a0904,a0905,a0906,a0907,a0908,a1001,a1004,a1005,a1006,a1007,a1008,a1009,a1101,a1104,a1105,a1106,a1107,a1108,a1109,a1110,a1201,a1204,a1205,a1206,a1207,a1208,a1209,a1210,a1211,c14,c15,c16,a1401,a1407,a1408,a1409,a1410,a1411,a1412,a1413,a1501,a1506,a1507,a1508,a1511,a1512,a1513,a1514,a1601,a1606,a1607,a1608,a1609,a1613,a1614,a1615,d401,d406,d407,d408,d409,d410,d411,d412,d413,d414,d415,d416,d501,d506,d507,d508,d509,d510,d511,d512,d513,d514,d515,d516,d601,d606,d607,d608,d609,d610,d611,d612,d613,d614,d615,d616,d701,d706,d707,d708,d709,d710,d711,d712,d713,d714,d715,d716)
end
function DP8ConstantCache(T::Type,T2::Type)
c7 = T2(1//4)
c8 = T2(4//13)
c9 = T2(127//195)
c10 = T2(3//5)
c11 = T2(6//7)
c6 = T2(4//3 * c7)
c5 = T2((6+sqrt(6))/10 * c6)
c4 = T2((6-sqrt(6))/10 * c6)
c3 = T2(2//3 * c4)
c2 = T2(2//3 * c3)
b1 = T(parse(BigFloat," 5.42937341165687622380535766363e-2"))
b6 = T(parse(BigFloat," 4.45031289275240888144113950566"))
b7 = T(parse(BigFloat," 1.89151789931450038304281599044"))
b8 = T(parse(BigFloat,"-5.8012039600105847814672114227"))
b9 = T(parse(BigFloat," 3.1116436695781989440891606237e-1"))
b10 = T(parse(BigFloat,"-1.52160949662516078556178806805e-1"))
b11 = T(parse(BigFloat," 2.01365400804030348374776537501e-1"))
b12 = T(parse(BigFloat," 4.47106157277725905176885569043e-2"))
# bhh1 = T(parse(BigFloat,"0.244094488188976377952755905512"))
# bhh2 = T(parse(BigFloat,"0.733846688281611857341361741547"))
# bhh3 = T(parse(BigFloat,"0.220588235294117647058823529412e-01"))
btilde1 = T(parse(BigFloat,"-1.898007540724076157147023288757e-1"))
btilde6 = T(parse(BigFloat," 4.45031289275240888144113950566"))
btilde7 = T(parse(BigFloat," 1.89151789931450038304281599044"))
btilde8 = T(parse(BigFloat,"-5.8012039600105847814672114227"))
btilde9 = T(parse(BigFloat,"-4.22682321323791962932445679177e-1"))
btilde10 = T(parse(BigFloat,"-1.52160949662516078556178806805e-1"))
btilde11 = T(parse(BigFloat," 2.01365400804030348374776537501e-1"))
btilde12 = T(parse(BigFloat,"2.26517921983608258118062039631e-2"))
er1 = T(parse(BigFloat," 0.1312004499419488073250102996e-01"))
er6 = T(parse(BigFloat,"-0.1225156446376204440720569753e+01"))
er7 = T(parse(BigFloat,"-0.4957589496572501915214079952"))
er8 = T(parse(BigFloat," 0.1664377182454986536961530415e+01"))
er9 = T(parse(BigFloat,"-0.3503288487499736816886487290"))
er10 = T(parse(BigFloat," 0.3341791187130174790297318841"))
er11 = T(parse(BigFloat," 0.8192320648511571246570742613e-01"))
er12 = T(parse(BigFloat,"-0.2235530786388629525884427845e-01"))
a0201 = T(parse(BigFloat," 5.26001519587677318785587544488e-2"))
a0301 = T(parse(BigFloat," 1.97250569845378994544595329183e-2"))
a0302 = T(parse(BigFloat," 5.91751709536136983633785987549e-2"))
a0401 = T(parse(BigFloat," 2.95875854768068491816892993775e-2"))
a0403 = T(parse(BigFloat," 8.87627564304205475450678981324e-2"))
a0501 = T(parse(BigFloat," 2.41365134159266685502369798665e-1"))
a0503 = T(parse(BigFloat,"-8.84549479328286085344864962717e-1"))
a0504 = T(parse(BigFloat," 9.24834003261792003115737966543e-1"))
a0601 = T(parse(BigFloat," 3.7037037037037037037037037037e-2"))
a0604 = T(parse(BigFloat," 1.70828608729473871279604482173e-1"))
a0605 = T(parse(BigFloat," 1.25467687566822425016691814123e-1"))
a0701 = T(parse(BigFloat," 3.7109375e-2"))
a0704 = T(parse(BigFloat," 1.70252211019544039314978060272e-1"))
a0705 = T(parse(BigFloat," 6.02165389804559606850219397283e-2"))
a0706 = T(parse(BigFloat,"-1.7578125e-2"))
a0801 = T(parse(BigFloat," 3.70920001185047927108779319836e-2"))
a0804 = T(parse(BigFloat," 1.70383925712239993810214054705e-1"))
a0805 = T(parse(BigFloat," 1.07262030446373284651809199168e-1"))
a0806 = T(parse(BigFloat,"-1.53194377486244017527936158236e-2"))
a0807 = T(parse(BigFloat," 8.27378916381402288758473766002e-3"))
a0901 = T(parse(BigFloat," 6.24110958716075717114429577812e-1"))
a0904 = T(parse(BigFloat,"-3.36089262944694129406857109825"))
a0905 = T(parse(BigFloat,"-8.68219346841726006818189891453e-1"))
a0906 = T(parse(BigFloat," 2.75920996994467083049415600797e1"))
a0907 = T(parse(BigFloat," 2.01540675504778934086186788979e1"))
a0908 = T(parse(BigFloat,"-4.34898841810699588477366255144e1"))
a1001 = T(parse(BigFloat," 4.77662536438264365890433908527e-1"))
a1004 = T(parse(BigFloat,"-2.48811461997166764192642586468e0"))
a1005 = T(parse(BigFloat,"-5.90290826836842996371446475743e-1"))
a1006 = T(parse(BigFloat," 2.12300514481811942347288949897e1"))
a1007 = T(parse(BigFloat," 1.52792336328824235832596922938e1"))
a1008 = T(parse(BigFloat,"-3.32882109689848629194453265587e1"))
a1009 = T(parse(BigFloat,"-2.03312017085086261358222928593e-2"))
a1101 = T(parse(BigFloat,"-9.3714243008598732571704021658e-1"))
a1104 = T(parse(BigFloat," 5.18637242884406370830023853209e0"))
a1105 = T(parse(BigFloat," 1.09143734899672957818500254654e0"))
a1106 = T(parse(BigFloat,"-8.14978701074692612513997267357e0"))
a1107 = T(parse(BigFloat,"-1.85200656599969598641566180701e1"))
a1108 = T(parse(BigFloat," 2.27394870993505042818970056734e1"))
a1109 = T(parse(BigFloat," 2.49360555267965238987089396762e0"))
a1110 = T(parse(BigFloat,"-3.0467644718982195003823669022e0"))
a1201 = T(parse(BigFloat," 2.27331014751653820792359768449e0"))
a1204 = T(parse(BigFloat," -1.05344954667372501984066689879e1"))
a1205 = T(parse(BigFloat," -2.00087205822486249909675718444e0"))
a1206 = T(parse(BigFloat," -1.79589318631187989172765950534e1"))
a1207 = T(parse(BigFloat," 2.79488845294199600508499808837e1"))
a1208 = T(parse(BigFloat," -2.85899827713502369474065508674e0"))
a1209 = T(parse(BigFloat," -8.87285693353062954433549289258e0"))
a1210 = T(parse(BigFloat," 1.23605671757943030647266201528e1"))
a1211 = T(parse(BigFloat," 6.43392746015763530355970484046e-1"))
c14,c15,c16,a1401,a1407,a1408,a1409,a1410,a1411,a1412,a1413,a1501,a1506,a1507,a1508,a1511,a1512,a1513,a1514,a1601,a1606,a1607,a1608,a1609,a1613,a1614,a1615 = DP8Interp(T,T2)
d401,d406,d407,d408,d409,d410,d411,d412,d413,d414,d415,d416,d501,d506,d507,d508,d509,d510,d511,d512,d513,d514,d515,d516,d601,d606,d607,d608,d609,d610,d611,d612,d613,d614,d615,d616,d701,d706,d707,d708,d709,d710,d711,d712,d713,d714,d715,d716 = DP8Interp_polyweights(T)
DP8ConstantCache(c7,c8,c9,c10,c11,c6,c5,c4,c3,c2,b1,b6,b7,b8,b9,b10,b11,b12,btilde1,btilde6,btilde7,btilde8,btilde9,btilde10,btilde11,btilde12,er1,er6,er7,er8,er9,er10,er11,er12,a0201,a0301,a0302,a0401,a0403,a0501,a0503,a0504,a0601,a0604,a0605,a0701,a0704,a0705,a0706,a0801,a0804,a0805,a0806,a0807,a0901,a0904,a0905,a0906,a0907,a0908,a1001,a1004,a1005,a1006,a1007,a1008,a1009,a1101,a1104,a1105,a1106,a1107,a1108,a1109,a1110,a1201,a1204,a1205,a1206,a1207,a1208,a1209,a1210,a1211,c14,c15,c16,a1401,a1407,a1408,a1409,a1410,a1411,a1412,a1413,a1501,a1506,a1507,a1508,a1511,a1512,a1513,a1514,a1601,a1606,a1607,a1608,a1609,a1613,a1614,a1615,d401,d406,d407,d408,d409,d410,d411,d412,d413,d414,d415,d416,d501,d506,d507,d508,d509,d510,d511,d512,d513,d514,d515,d516,d601,d606,d607,d608,d609,d610,d611,d612,d613,d614,d615,d616,d701,d706,d707,d708,d709,d710,d711,d712,d713,d714,d715,d716)
end
function DP8Interp(::Type{T},::Type{T2}) where {T<:CompiledFloats,T2<:CompiledFloats}
c14 = T2(0.1)
c15 = T2(0.2)
c16 = T2(0.7777777777777778)
a1401 = T(0.056167502283047954)
a1407 = T(0.25350021021662483)
a1408 = T(-0.2462390374708025)
a1409 = T(-0.12419142326381637)
a1410 = T(0.15329179827876568)
a1411 = T(0.00820105229563469)
a1412 = T(0.007567897660545699)
a1413 = T(-0.008298)
a1501 = T(0.03183464816350214)
a1506 = T(0.028300909672366776)
a1507 = T(0.053541988307438566)
a1508 = T(-0.05492374857139099)
a1511 = T(-0.00010834732869724932)
a1512 = T(0.0003825710908356584)
a1513 = T(-0.00034046500868740456)
a1514 = T(0.1413124436746325)
a1601 = T(-0.42889630158379194)
a1606 = T(-4.697621415361164)
a1607 = T(7.683421196062599)
a1608 = T(4.06898981839711)
a1609 = T(0.3567271874552811)
a1613 = T(-0.0013990241651590145)
a1614 = T(2.9475147891527724)
a1615 = T(-9.15095847217987)
return c14,c15,c16,a1401,a1407,a1408,a1409,a1410,a1411,a1412,a1413,a1501,a1506,a1507,a1508,a1511,a1512,a1513,a1514,a1601,a1606,a1607,a1608,a1609,a1613,a1614,a1615
end
function DP8Interp(T::Type,T2::Type)
c14 = T2(1//10)
c15 = T2(2//10)
c16 = T2(7//9)
a1401 = T(parse(BigFloat," 5.61675022830479523392909219681e-2"))
a1407 = T(parse(BigFloat," 2.53500210216624811088794765333e-1"))
a1408 = T(parse(BigFloat,"-2.46239037470802489917441475441e-1"))
a1409 = T(parse(BigFloat,"-1.24191423263816360469010140626e-1"))
a1410 = T(parse(BigFloat," 1.5329179827876569731206322685e-1"))
a1411 = T(parse(BigFloat," 8.20105229563468988491666602057e-3"))
a1412 = T(parse(BigFloat," 7.56789766054569976138603589584e-3"))
a1413 = T(parse(BigFloat,"-8.298e-3"))
a1501 = T(parse(BigFloat," 3.18346481635021405060768473261e-2"))
a1506 = T(parse(BigFloat," 2.83009096723667755288322961402e-2"))
a1507 = T(parse(BigFloat," 5.35419883074385676223797384372e-2"))
a1508 = T(parse(BigFloat,"-5.49237485713909884646569340306e-2"))
a1511 = T(parse(BigFloat,"-1.08347328697249322858509316994e-4"))
a1512 = T(parse(BigFloat," 3.82571090835658412954920192323e-4"))
a1513 = T(parse(BigFloat,"-3.40465008687404560802977114492e-4"))
a1514 = T(parse(BigFloat," 1.41312443674632500278074618366e-1"))
a1601 = T(parse(BigFloat,"-4.28896301583791923408573538692e-1"))
a1606 = T(parse(BigFloat,"-4.69762141536116384314449447206e0"))
a1607 = T(parse(BigFloat," 7.68342119606259904184240953878e0"))
a1608 = T(parse(BigFloat," 4.06898981839711007970213554331e0"))
a1609 = T(parse(BigFloat," 3.56727187455281109270669543021e-1"))
a1613 = T(parse(BigFloat,"-1.39902416515901462129418009734e-3"))
a1614 = T(parse(BigFloat," 2.9475147891527723389556272149e0"))
a1615 = T(parse(BigFloat,"-9.15095847217987001081870187138e0"))
return c14,c15,c16,a1401,a1407,a1408,a1409,a1410,a1411,a1412,a1413,a1501,a1506,a1507,a1508,a1511,a1512,a1513,a1514,a1601,a1606,a1607,a1608,a1609,a1613,a1614,a1615
end
function DP8Interp_polyweights(::Type{T}) where T<:CompiledFloats
d401 = T(-8.428938276109013)
d406 = T(0.5667149535193777)
d407 = T(-3.0689499459498917)
d408 = T(2.38466765651207)
d409 = T(2.117034582445028)
d410 = T(-0.871391583777973)
d411 = T(2.2404374302607883)
d412 = T(0.6315787787694688)
d413 = T(-0.08899033645133331)
d414 = T(18.148505520854727)
d415 = T(-9.194632392478356)
d416 = T(-4.436036387594894)
d501 = T(10.427508642579134)
d506 = T(242.28349177525817)
d507 = T(165.20045171727028)
d508 = T(-374.5467547226902)
d509 = T(-22.113666853125306)
d510 = T(7.733432668472264)
d511 = T(-30.674084731089398)
d512 = T(-9.332130526430229)
d513 = T(15.697238121770845)
d514 = T(-31.139403219565178)
d515 = T(-9.35292435884448)
d516 = T(35.81684148639408)
d601 = T(19.985053242002433)
d606 = T(-387.0373087493518)
d607 = T(-189.17813819516758)
d608 = T(527.8081592054236)
d609 = T(-11.57390253995963)
d610 = T(6.8812326946963)
d611 = T(-1.0006050966910838)
d612 = T(0.7777137798053443)
d613 = T(-2.778205752353508)
d614 = T(-60.19669523126412)
d615 = T(84.32040550667716)
d616 = T(11.99229113618279)
d701 = T(-25.69393346270375)
d706 = T(-154.18974869023643)
d707 = T(-231.5293791760455)
d708 = T(357.6391179106141)
d709 = T(93.40532418362432)
d710 = T(-37.45832313645163)
d711 = T(104.0996495089623)
d712 = T(29.8402934266605)
d713 = T(-43.53345659001114)
d714 = T(96.32455395918828)
d715 = T(-39.17726167561544)
d716 = T(-149.72683625798564)
return d401,d406,d407,d408,d409,d410,d411,d412,d413,d414,d415,d416,d501,d506,d507,d508,d509,d510,d511,d512,d513,d514,d515,d516,d601,d606,d607,d608,d609,d610,d611,d612,d613,d614,d615,d616,d701,d706,d707,d708,d709,d710,d711,d712,d713,d714,d715,d716
end
function DP8Interp_polyweights(T::Type)
d401 = T(parse(BigFloat,"-0.84289382761090128651353491142e+01"))
d406 = T(parse(BigFloat," 0.56671495351937776962531783590e+00"))
d407 = T(parse(BigFloat,"-0.30689499459498916912797304727e+01"))
d408 = T(parse(BigFloat," 0.23846676565120698287728149680e+01"))
d409 = T(parse(BigFloat," 0.21170345824450282767155149946e+01"))
d410 = T(parse(BigFloat,"-0.87139158377797299206789907490e+00"))
d411 = T(parse(BigFloat," 0.22404374302607882758541771650e+01"))
d412 = T(parse(BigFloat," 0.63157877876946881815570249290e+00"))
d413 = T(parse(BigFloat,"-0.88990336451333310820698117400e-01"))
d414 = T(parse(BigFloat," 0.18148505520854727256656404962e+02"))
d415 = T(parse(BigFloat,"-0.91946323924783554000451984436e+01"))
d416 = T(parse(BigFloat,"-0.44360363875948939664310572000e+01"))
d501 = T(parse(BigFloat," 0.10427508642579134603413151009e+02"))
d506 = T(parse(BigFloat," 0.24228349177525818288430175319e+03"))
d507 = T(parse(BigFloat," 0.16520045171727028198505394887e+03"))
d508 = T(parse(BigFloat,"-0.37454675472269020279518312152e+03"))
d509 = T(parse(BigFloat,"-0.22113666853125306036270938578e+02"))
d510 = T(parse(BigFloat," 0.77334326684722638389603898808e+01"))
d511 = T(parse(BigFloat,"-0.30674084731089398182061213626e+02"))
d512 = T(parse(BigFloat,"-0.93321305264302278729567221706e+01"))
d513 = T(parse(BigFloat," 0.15697238121770843886131091075e+02"))
d514 = T(parse(BigFloat,"-0.31139403219565177677282850411e+02"))
d515 = T(parse(BigFloat,"-0.93529243588444783865713862664e+01"))
d516 = T(parse(BigFloat," 0.35816841486394083752465898540e+02"))
d601 = T(parse(BigFloat," 0.19985053242002433820987653617e+02"))
d606 = T(parse(BigFloat,"-0.38703730874935176555105901742e+03"))
d607 = T(parse(BigFloat,"-0.18917813819516756882830838328e+03"))
d608 = T(parse(BigFloat," 0.52780815920542364900561016686e+03"))
d609 = T(parse(BigFloat,"-0.11573902539959630126141871134e+02"))
d610 = T(parse(BigFloat," 0.68812326946963000169666922661e+01"))
d611 = T(parse(BigFloat,"-0.10006050966910838403183860980e+01"))
d612 = T(parse(BigFloat," 0.77771377980534432092869265740e+00"))
d613 = T(parse(BigFloat,"-0.27782057523535084065932004339e+01"))
d614 = T(parse(BigFloat,"-0.60196695231264120758267380846e+02"))
d615 = T(parse(BigFloat," 0.84320405506677161018159903784e+02"))
d616 = T(parse(BigFloat," 0.11992291136182789328035130030e+02"))
d701 = T(parse(BigFloat,"-0.25693933462703749003312586129e+02"))
d706 = T(parse(BigFloat,"-0.15418974869023643374053993627e+03"))
d707 = T(parse(BigFloat,"-0.23152937917604549567536039109e+03"))
d708 = T(parse(BigFloat," 0.35763911791061412378285349910e+03"))
d709 = T(parse(BigFloat," 0.93405324183624310003907691704e+02"))
d710 = T(parse(BigFloat,"-0.37458323136451633156875139351e+02"))
d711 = T(parse(BigFloat," 0.10409964950896230045147246184e+03"))
d712 = T(parse(BigFloat," 0.29840293426660503123344363579e+02"))
d713 = T(parse(BigFloat,"-0.43533456590011143754432175058e+02"))
d714 = T(parse(BigFloat," 0.96324553959188282948394950600e+02"))
d715 = T(parse(BigFloat,"-0.39177261675615439165231486172e+02"))
d716 = T(parse(BigFloat,"-0.14972683625798562581422125276e+03"))
return d401,d406,d407,d408,d409,d410,d411,d412,d413,d414,d415,d416,d501,d506,d507,d508,d509,d510,d511,d512,d513,d514,d515,d516,d601,d606,d607,d608,d609,d610,d611,d612,d613,d614,d615,d616,d701,d706,d707,d708,d709,d710,d711,d712,d713,d714,d715,d716
end
|
proofpile-julia0005-42713 | {
"provenance": "014.jsonl.gz:242714"
} | using JLD
using DifferentialEquations
using Plots
using Printf
include("../../../../src/estimate/ct_filters/ct_kalman_simple.jl")
#include("../../../../src/estimate/kalman.jl")
#include("../../../../src/estimate/ct_filters/ct_kalman_filter_mdn.jl")
EX =2
# set up transition/meas eqs matrixes
if EX == 1
# AR(1) model
n_vars = 1
n_states = 1
n_shocks = 1
T = Array{Float64}(undef,n_states,n_states)
T .= -10.0
R = Array{Float64}(undef,n_states,n_shocks)
R = 10.0
Z = Array{Float64}(undef,n_vars,n_states)
Z .= 1.0
elseif EX == 2
# independent AR(1) models -- load only on the first one
n_vars = 1
n_states = 2
n_shocks = 1
T = Array{Float64}(undef,n_states,n_states)
T[1,1] = -10.0
T[2,2] = -10.0
R = Array{Float64}(undef,n_states,n_shocks)
R .= 1.0
@show R
Z = Array{Float64}(undef,n_vars,n_states)
Z[1,1] = 1.0
Z[1,2] = 0.0
end
# generate data (solve using Euler-maruyama)
n_quarters = 200.
global s_t = zeros(Float64, n_states) #initial values
# #use Differential Equations
# # Set up SDE
# f(u,p,t) = T*u
# g(u,p,t) = R
# dt = 1.0/90.0 # daily frequency when one period is one quarter
# tspan = (0.0,n_quarters) # 50 years of data
# W = WienerProcess(0.0, 0., 0.)
# prob = SDEProblem(f, g, s_t, tspan, W)
# sol = solve(prob, EM(), dt = dt)
# s_1T = vcat(sol.u...)
# data_d = s_1T*Z'
# gr()
# default(show = true)
# p = plot(sol.t, data_d)
# sleep(2)
data_d = Array{Float64}(undef,Int(round(n_quarters/dt)),n_vars)
time_d = Array{Float64}(undef,Int(round(n_quarters/dt)))
n_step = 10.0
for i_t = 1:Int(round(n_quarters/dt))
for i = 1:Int(n_step)
s_t = s_t + T*s_t*dt/n_step + sqrt(dt/n_step)*R*randn(n_shocks, 1);
end
data_d[i_t,:] = Z*s_t
time_d[i_t] = i_t*dt
end
gr()
default(show = true)
p = plot(time_d, data_d, color = :red)
# created 50 years worth of daily data -> transform data sampled at quarterly frequency for discrete observations
Delta_t = 1.0#1.0/90.0##1.0
del_t = dt./Delta_t
@show n_y = Int(round.(length(data_d)*dt/Delta_t;digits = 0))
data = Array{Float64}(undef,n_y,n_vars)
for i = 1:n_y
data[i,:] .= data_d[1+(i-1)*Int(floor(Delta_t/dt)),:]
end
E = zeros(n_vars, n_vars) # meas error
Q = Matrix{Float64}(I,n_shocks,n_shocks) # variance of shocks
mean_0 = zeros(n_states,1) # intial state mean
var_0 = 0.1*Matrix{Float64}(I,n_states,n_states) # initial state variance
for sig = 0.1:0.1:2.0
prob_out = ct_kalman_simple(T, Z, R*(sig*Q)*R', E, mean_0, var_0, data, [Delta_t])
@show [sig prob_out]
end
# not sure the Delta_t abd dt are correct
#E = zeros(n_vars, n_vars) * Delta_t # Shock is Browian motion -> variance is Δ_t b/c this is quarterly observation data
#Q = Matrix{Float64}(I,n_shocks,n_shocks) * dt # Shock is Brownian motion -> variance is dt b/c this is state data
#out = ct_kalman_filter(data, T, R, C, Q, Z, D, E, Delta_t)
#@show true_lik = sum(out[1])
# #, label = "High vol",xlims=(glimpdf[1],glimpdf[2]))
#=
SeHyoun's function
% Test Kalman Filer <kalman_ct>
n_simul = 1000;
n_step = 50;
make_plots = false;
F = [-1, 0; 0, -1];
F_sim = F;
Q = 0.02*speye(2);
% Q = [0.1, 0.02; 0.02, 0.1];
R = 0.01*speye(2);
% R = [0.1, 0.02; 0.02, 0.1];
H = speye(2);
mean_init = [2; -2];
var_init = 0.01*speye(2);
mean_now = mean_init;
data_x = mean_now + sqrt(0.1)*randn(2,1);
var_now = Var_init;
dt = 0.25;
F_val = 0.1:0.1:1.9;
data_y_iter = zeros(2, n_simul);
for iter_time = 1:n_simul
for i = 1:n_step
data_x = data_x + F*data_x*dt/n_step + sqrt(dt/n_step)*sqrtm(Q)*randn(2, 1);
end
data_y = H*data_x + sqrtm(R)*randn(2, 1);
data_y_iter(:, iter_time) = data_y;
end
figure(101);
% plot(0:dt:dt*(n_simul-1), data_y_iter);
plot(data_y_iter(1,:), data_y_iter(2, :), '.-');
time_step = dt*ones(n_simul, 1);
for iter_outer = 1:length(F_val)
F_sim = F_val(iter_outer)*F;
[mean_now, var_now, log_like] = kalman_ct(F_sim, H, Q, R, mean_init, var_init, data_y_iter, time_step);
log_like_iter(iter_outer) = log_like;
figure(4);
clf;
plot(F_val(1:iter_outer), log_like_iter(1:iter_outer));
xlim([0.1, 1.9]);
drawnow
end
=#
|
proofpile-julia0005-42714 | {
"provenance": "014.jsonl.gz:242715"
} | ##############################################
### Interactions with other point types ###
##############################################
# convert to a geodesy type - overload this with user methods
"""
Convert to geodesy representations of a custom type
If the input is a Type, a Geodesy type should be returned
If the input is an object, a Geodesy object should be returned
"""
geodify{T}(::Type{T}) = error("Function to retrieve the appropriate geodesy type for a type $(T) is not defined")
geodify(X) = error("No conversion is defined to convert from type $(typeof(X)) to a geodesy type")
# and set a default transformation to the desired output type
geotransform_ext{T}(::Type{T}, X) = T(geotransform(geodify(T), geodify(X)), X)
# The below was for when I was allowing extra arguments to pass to the output type constructor
# geotransform_ext{T}(::Type{T}, X...) = T(geotransform(geodify(T), geodify(X[1])), X...)
# define geodify for all known types
for geo_type in Geodesy.GeodesyTypes
q = quote
geodify(::Type{$(geo_type)}) = $(geo_type)
geodify{T}(::Type{$(geo_type){T}}) = $(geo_type){T}
geodify(X::$(geo_type)) = X
end
eval(q) # TODO: eval? figure out the right way to do this
end
|
proofpile-julia0005-42715 | {
"provenance": "014.jsonl.gz:242716"
} | module GNSSBenchmarks
using
CUDA,
CSV,
StructArrays,
BenchmarkTools,
LoopVectorization,
Tracking,
TrackingLoopFilters,
GNSSSignals,
DataFrames,
PGFPlotsX,
LinearAlgebra,
StaticArrays
using Unitful: upreferred, Hz, dBHz, ms, kHz, MHz
import
LinearAlgebra.dot,
Base.length,
Tracking.TrackingState,
Tracking.NumAnts,
Tracking.MomentsCN0Estimator,
Tracking.AbstractCN0Estimator,
Tracking.AbstractCorrelator,
Tracking.EarlyPromptLateCorrelator,
Tracking.SecondaryCodeOrBitDetector,
Tracking.GainControlledSignal,
Tracking.found,
Tracking.TrackingResults,
Tracking.BitBuffer,
Tracking.get_default_post_corr_filter,
Tracking.get_num_samples_left_to_integrate,
Tracking.get_num_samples,
Tracking.get_num_ants,
Tracking.get_current_carrier_frequency,
Tracking.get_current_code_frequency,
TrackingLoopFilters.AbstractLoopFilter,
GNSSSignals.AbstractGNSSSystem
export
main,
plotall,
benchmark_downconvert,
benchmark_carrier_replica,
benchmark_code_replica,
benchmark_correlate,
benchmark_tracking_loop,
plot_carrier_replica,
gpu_correlate,
plot_correlate_all,
sgpu_correlate,
plot_code_replica_all
include("main.jl")
include("plot.jl")
include("gpu_downconvert.jl")
include("gpu_carrier_replica.jl")
include("gpu_code_replica.jl")
include("gpu_correlate.jl")
include("gpu_tracking_state.jl")
include("gpu_tracking_results.jl")
include("gpu_tracking_loop.jl")
include("benchmark_carrier_replica.jl")
include("benchmark_code_replica.jl")
include("benchmark_downconvert.jl")
include("benchmark_correlate.jl")
include("benchmark_gpu_tracking_loop.jl")
const MAX_NUM_SAMPLES = 50000
const SAMPLES = StepRange(2500,2500,MAX_NUM_SAMPLES)
const ANTENNA = [1, 4, 16]
end
|
proofpile-julia0005-42716 | {
"provenance": "014.jsonl.gz:242717"
} | abstract type AbstractARGNode end
"""
mutable struct ARGNode
Node of an Ancestral Reassortment Graph representing two genes / segments.
## Fields:
```
anc :: Vector{Union{Nothing, AbstractARGNode}} = [nothing, nothing] # Always of length 2
color :: Vector{Bool} = [false, false]
tau :: Vector{Union{Missing, Float64}} = [missing, missing]
children :: Vector{ARGNode} = ARGNode[]
label :: String = make_random_label()
hybrid :: Bool = false # Hybrids can have the same ancestor for some reassortments
```
"""
@with_kw_noshow mutable struct ARGNode <: AbstractARGNode
anc :: Vector{Union{Nothing, AbstractARGNode}} = [nothing, nothing] # Always of length 2
color :: Vector{Bool} = [false, false]
tau :: Vector{Union{Missing, Float64}} = [missing, missing]
children :: Vector{ARGNode} = ARGNode[]
label :: String = make_random_label()
hybrid :: Bool = false # Hybrids can have the same ancestor for some reassortments
end
struct RootNode <: AbstractARGNode
# children :: Vector{ARGNode}
end
# Equality
isequal(a::ARGNode, b::ARGNode) = (a.label==b.label)
isequal(a::ARGNode, b::RootNode) = false
isequal(a::RootNode, b::ARGNode) = false
isequal(a::ARGNode, b::Nothing) = false
isequal(a::Nothing, b::ARGNode) = false
Base.:(==)(x::ARGNode, y::ARGNode) = isequal(x,y)
Base.hash(x::ARGNode, h::UInt) = hash(x.label, h)
# Access
ancestors(n::ARGNode) = n.anc
function ancestor(n::ARGNode, c::Int)
@assert hascolor(n, c)
return n.anc[c]
end
children(n::ARGNode) = n.children
children(n::RootNode) = n.children
"""
children(n::ARGNode, clr)
Children of `n` for color `clr`. Child `c` must meet the conditions:
- `hascolor(c, clr)`
- `ancestor(c, clr) == n`
"""
function children(n::ARGNode, clr)
Iterators.filter(children(n)) do c
hascolor(c, clr) && ancestor(c, clr) == n
end
end
color(n::ARGNode) = n.color
color(::RootNode) = nothing
label(n::ARGNode) = n.label
label(::RootNode) = "_RootNode_"
label(::Nothing) = nothing
branch_length(n::ARGNode) = n.tau
branch_length(n::RootNode) = 0.
tau(n) = branch_length(n)
function branch_length(n::ARGNode, clr)
@assert hascolor(n, clr)
return n.tau[clr]
end
function branch_length(n::RootNode, clr)
@assert hascolor(n, clr)
return 0.
end
## isroot / isleaf
"""
isroot(n)
Is `n` the root for one color?
"""
isroot(n::ARGNode) = in(RootNode(), n.anc)
"""
isroot(n, c)
Is `n` the root for color `c`?
"""
isroot(n::ARGNode, c::Int) = (n.anc[c] == RootNode())
function is_global_root(n::ARGNode)
if isroot(n)
for c in 1:length(n.anc)
if !isroot(n, c)
return false
end
end
return true
end
return false
end
function is_partial_root(n::ARGNode)
if !isroot(n)
return false
else
return !is_global_root(n)
end
end
isleaf(n::ARGNode) = isempty(children(n))
ishybrid(n::ARGNode) = n.hybrid
ishybrid(::RootNode) = false
degree(n) = sum(color(n))
isshared(n) = (degree(n) == length(color(n)))
# Tests
hasancestor(n::ARGNode, c::Int) = !isnothing(ancestor(n,c)) && !isroot(n,c)
function hasancestor(n::ARGNode, a::ARGNode)
for x in ancestors(n)
if x == a
return true
end
end
return false
end
function haschild(a::ARGNode, n::ARGNode)
for c in children(a)
if c == n
return true
end
end
return false
end
hascolor(n::ARGNode, c::Int) = n.color[c]
hascolor(::RootNode, c::Int) = true
hascolor(::Nothing, c::Int) = true
function share_color(n1::ARGNode, n2::ARGNode)
for (c1,c2) in zip(color(n1), color(n2))
if c1 && c2
return true
end
end
return false
end
share_color(n::ARGNode, r::RootNode) = true
share_color(r::RootNode, n::ARGNode) = true
# Set / add children / ancestors
function add_child!(n::ARGNode, c::ARGNode)
@assert !haschild(n, c) && share_color(n, c)
push!(children(n), c)
end
function set_ancestor!(n, a, c::Int)
@assert hascolor(n, c) && hascolor(a, c) "Nodes do not share color $c: \n $(label(n)): $(color(n)) \n $(label(a)): $(color(a))"
n.anc[c] = a
end
function set_ancestor!(n, a)
for (i,c) in color(n)
if c
set_ancestor!(n, a, i)
end
end
end
function add_color!(n, c::Int)
@assert !hascolor(n, c) "Node $(n.label) already has color $c"
n.color[c] = true
end
function set_branch_length!(n, τ::Real, c, clrs...)
@assert hascolor(n, c)
@assert hasancestor(n, c)
n.tau[c] = τ + eps()
for clr in clrs
set_branch_length!(n, τ, clr)
end
end
function set_branch_length!(n, ::Missing, c, clrs...)
@assert hascolor(n, c)
n.tau[c] = missing
for clr in clrs
set_branch_length!(n, missing, clr)
end
end
set_label!(n::ARGNode, label) = (n.label = label)
# Utils.
function othercolor(c)
@assert (c==1 || c==2) "Invalid color $c"
return (c==1) ? 2 : 1
end
"""
edgecolor(a, n)
Color of the edge going from `a` to `n`
"""
function edgecolor(a, n)
clr = [false, false]
for (i,c) in enumerate(color(n))
if c && a == ancestor(n, i)
clr[i] = true
end
end
return clr
end
mutable struct ARG
roots :: Dict{Int, ARGNode}
nodes :: Dict{String, ARGNode}
hybrids :: Dict{String, ARGNode}
leaves :: Dict{String, ARGNode}
end
roots(arg::ARG) = arg.roots
nodes(arg::ARG) = values(arg.nodes)
hybrids(arg::ARG) = values(arg.hybrids)
leaves(arg::ARG) = values(arg.leaves)
|
proofpile-julia0005-42717 | {
"provenance": "014.jsonl.gz:242718"
} | ## Exercise 5-7
## https://benlauwens.github.io/ThinkJulia.jl/latest/images/fig52.svg
## Figure 8. A Koch curve
## The Koch curve is a fractal that looks something like A Koch curve. To draw a Koch curve with length x, all you have to do is
## 1. Draw a Koch curve with length x/3.
## 2. Turn left 60 degrees.
## 3. Draw a Koch curve with length x/3.
## 4. Turn right 120 degrees.
## 5. Draw a Koch curve with length x/3.
## 6. Turn left 60 degrees.
## 7. Draw a Koch curve with length x/3.
## The exception is if x is less than 3: in that case, you can just draw a straight line with length x.
using ThinkJulia
## 1. Write a function called koch that takes a turtle and a length as parameters, and that uses the turtle to draw a Koch curve with the given length.
println("Ans 1: ")
function koch(turtle::Turtle, length)
if length < 3
forward(turtle, length)
return
end
koch(turtle, length/3)
turn(turtle, 60)
koch(turtle, length/3)
turn(turtle, -120)
koch(turtle, length/3)
turn(turtle, 60)
koch(turtle, length/3)
end
turtle = Turtle()
# @svg begin
# koch(turtle, 100)
# end
## 2. Write a function called snowflake that draws three Koch curves to make the outline of a snowflake.
println("Ans 2: ")
function snowflake(turtle, length)
koch(turtle, length)
turn(turtle, -60)
koch(turtle, length)
turn(turtle, -60)
koch(turtle, length)
turn(turtle, -60)
koch(turtle, length)
turn(turtle, -60)
koch(turtle, length)
turn(turtle, -60)
koch(turtle, length)
turn(turtle, -60)
end
# @svg begin
# snowflake(turtle, 100)
# end
## 3. The Koch curve can be generalized in several ways. See https://en.wikipedia.org/wiki/Koch_snowflake for examples and implement your favorite.
println("Ans 3: ")
function koch_square(turtle::Turtle, length)
if length < 3
forward(turtle, length)
return
end
koch_square(turtle, length/3)
turn(turtle, 90)
koch_square(turtle, length/3)
turn(turtle, -90)
koch_square(turtle, length/3)
turn(turtle, -90)
koch_square(turtle, length/3)
turn(turtle, 90)
koch_square(turtle, length/3)
turn(turtle, 90)
end
@svg begin
koch_square(turtle, 60)
end
println("End.")
|
proofpile-julia0005-42718 | {
"provenance": "014.jsonl.gz:242719"
} | import Base: convert, print, show, push!, length, getindex, sort!, sort, +,-,*,.*,==,/, setindex!,replace,start,next,done,zero
abstract Component
function ==(c1::Component, c2::Component)
if isa(c1,typeof(c2))
for n in fieldnames(c1)
if getfield(c1,n)!=getfield(c2,n)
return false
end
end
return true
end
return false
end
abstract SingleArg <: Component
==(sa1::SingleArg,sa2::SingleArg)=isa(sa1,typeof(sa2))&&sa1.x==sa2.x
abstract NonAbelian <: Component
abstract Operator <: Component
function getargs(c::Component)
n=fieldnames(c)
ret=Any[]
for nam in n
push!(ret,getfield(c,nam))
end
return ret
end
function getarg(c::Component,n::Integer=1)
getfield(c,fieldnames(c)[n])
end
function setarg!(c::Component,newarg,argn::Integer=1)
setfield!(c,fieldnames(c)[argn],newarg)
return c
end
setarg(c::Component,newarg,argn::Integer=1)=setarg!(deepcopy(c),newarg,argn)
type Components <: Component
components
coef
end #maybe this type should be deprecated and sumsym rewritten
function ==(cs1::Components,cs2::Components)
nc=length(cs1.components)
if nc != length(cs2.components)
return false
else
for c in 1:nc
if length(indsin(cs1.components,cs1.components[c]))!=length(indsin(cs2.components,cs1.components[c]))
return false
end
end
end
return true
end
type Expression
terms::Array{Array{Union{Number,Symbol,Component,Expression},1},1}
end
length(ex::Expression)=length(ex.terms)
getindex(ex::Expression,i::Integer)=getindex(ex.terms,i)
function getindex(ex::Expression,t::Array)
p=ex[t[1]]
if length(t)==1
return p
end
for i in 2:length(t)-1
p=p[t[i]]
end
return p[t[end]]
end
function setindex!(ex::Expression,a,t::Array)
p=ex[t[1]]
for i in 2:length(t)-1
p=p[t[i]]
end
p[t[end]]=a
end
function getindex(c::Component,t::Array)
p=getarg(c,t[1])
for i in 2:length(t)-1
p=p[t[i]]
end
return p[t[end]]
end
function setindex!(c::Component,a,t::Array)
p=getarg(c,t[1])
for i in 2:length(t)-1
p=p[t[i]]
end
p[t[end]]=a
end
function print(io::IO,c::Union{Number,Component})
if isa(c,Number)&&(isa(c,Complex)||c<0)
print(io,'(')
show(io,c)
print(io,')')
elseif isa(c,SingleArg)
print(io,typeof(c),'(')
print(io,getarg(c))
print(io,')')
elseif isa(c,Component)
print(io,typeof(c),'(')
ca=getargs(c)
for a in 1:length(ca)-1
print(io,ca[a])
print(io,',')
end
print(io,ca[end])
print(io,')')
else
show(io,c)
end
end
function preprint(io::IO,fac)
if isa(fac,Expression)
print(io,'(')
print(io,fac)
print(io,')')
else
print(io,fac)
end
end
function print(io::IO,ex::Expression)
if isempty(ex.terms)
print(io,ex.terms)
else
for term in 1:length(ex.terms)-1
for fac in 1:length(ex.terms[term])-1
preprint(io,ex.terms[term][fac])
print(io,' ')
end
preprint(io,ex.terms[term][end])
print(io," + ")
end
for fac in 1:length(ex.terms[end])-1
preprint(io,ex.terms[end][fac])
print(io,' ')
end
preprint(io,ex.terms[end][end])
end
end
N=Union{Number,Symbol}
X=Union{Number,Symbol,Component}
Ex=Union{Symbol,Component,Expression}
EX=Union{Number,Symbol,Component,Expression}
typealias Factor EX
show(io::IO,x::Type{Factor})=print(io, "Factor")
typealias Term Array{Factor,1}
convert(::Type{Array{Array{Factor,1},1}},a::Array{Any,1})=Term[a]
zero(f::Factor)=0
macro delegate(source, targets) # by JMW
typename = esc(source.args[1])
fieldname = esc(Expr(:quote, source.args[2].args[1]))
funcnames = targets.args
n = length(funcnames)
fdefs = Array(Any, n)
for i in 1:n
funcname = esc(funcnames[i])
fdefs[i] = quote
($funcname)(a::($typename), args...) =
($funcname)(a.($fieldname), args...)
end
end
return Expr(:block, fdefs...)
end
complexity(n::N)=1
function complexity(c::Component)
tot=0
for n in fieldnames(c)
tot+=complexity(getfield(c,n))
end
return tot
end
function complexity(term::Term)
tot=0
for factor in term
tot+=complexity(factor)
end
return tot
end
function complexity(ex::Expression)
tot=0
for term in ex
tot+=complexity(term)
end
return tot
end
function complexity(a::Array)
tot=0
for item in a
tot+=complexity(item)
end
return tot
end
expression(a::Term)=Expression(Term[a])
expression(a::Array{Term})=Expression(a)
function expression(cs::Array{Components})
if isempty(cs)
return 0
end
ex=Expression(Term[])
for cc in cs
if cc.coef!=0
nterm=Factor[]
if cc.coef!=1||isempty(cc.components)
push!(nterm,cc.coef)
end
for c in cc.components
push!(nterm,c)
end
push!(ex.terms,nterm)
end
end
if length(ex)==0
return 0
else
return ex
end
end
expression(x::X)=expression(Factor[x])
==(ex1::Expression,ex2::Expression)=ex1.terms==ex2.terms
push!(ex::Expression,a)=push!(ex.terms,a)
push!(x::X,a)=expression(Factor[x,a])
+(ex1::Expression,ex2::Expression)=begin;ex=deepcopy(ex1);push!(ex.terms,[ex2]);ex;end
+(ex::Expression,a::X)=begin;ex=deepcopy(ex);push!(ex.terms,[a]);ex;end
-(ex::Expression,a::X)=begin;ex=deepcopy(ex);push!(ex.terms,[-1,a]);ex;end
-(a::X,ex::Expression)=a+(-1*ex)
-(ex1::Expression,ex2::Expression)=begin;ex=deepcopy(ex1);push!(ex,Factor[-1,ex2]);ex;end
+(a::X,ex::Expression)=begin;ex=deepcopy(ex);insert!(ex.terms,1,Factor[a]);ex;end
*(ex1::Expression,ex2::Expression)=expression(Factor[deepcopy(ex1),deepcopy(ex2)])
#.*(a::Array,ex::Ex)=ex.*a
function .*(ex::Ex,a::Array)
na=EX[]
for i in 1:length(a)
push!(na,ex*a[i])
end
return na
end
.*(n::Number,ex::EX)=*(n,ex)
/(ex::Ex,n::Number)=*(1/n,ex)
function *(a::X,ex::Expression)
ex=deepcopy(ex)
for ti in 1:length(ex)
insert!(ex[ti],1,a)
end
return ex
end
*(a::Number,c::Component)=Expression([a,c])
*(c1::Union{Component,Symbol},c2::Union{Component,Symbol})=Expression(Term[Factor[c1,c2]])
function *(ex::Expression,x::EX)
ap=terms(deepcopy(ex))
for t in ap
push!(t,x)
end
return expression(ap)
end
-(c1::Component,c2::Component)=+(c1,-1*c2)
+(c1::Component,c2::Component)=Expression(Term[Factor[c1],Factor[c2]])
+(c::Component,ex::Expression)=Expression(Term[Factor[c],Factor[ex]])
+(ex::Expression,c::Component)=Expression(Term[Factor[ex],Factor[c]])
+(x1::X,x2::X)=Expression(Term[[x1],[x2]])
-(x1::X,x2::X)=Expression(Term[[x1],[-1,x2]])
-(x::X)=Expression([-1,x])
-(ex::Expression)=-1*ex
*(x1::X,x2::X)=Expression([x1,x2])
abstract Operation <: Component
function indin(array,item)
ind=0
for it in array
ind+=1
if it==item
return ind
end
end
return 0
end
function indin(array,typ::Type)
for it in 1:length(array)
if isa(array[it],typ)
return it
end
end
return 0
end
function expandindices(inds::Tuple,ninds::Array{Array{Integer}}=Array{Integer}[],nind::Array{Integer}=Integer[])
for l in 1:length(inds)
if isa(inds[l],Integer)
push!(nind,inds[l])
elseif isa(inds[l],Array)
for a in inds[l]
if isa(a,Tuple)
tia=expandindices(a)
for ti in deepcopy(tia)
nnind=deepcopy(nind)
pushall!(nnind,ti)
push!(ninds,nnind)
end
else
nnind=deepcopy(nind)
push!(nnind,a)
push!(ninds,nnind)
end
end
else
println(inds)
end
end
return ninds
end
function expandindices(a::Array)
na=Array{Integer}[]
for i in 1:length(a)
pushall!(na,expandindices(a[i]))
end
return na
end
function indsin(array::Array,item)
ind=Int64[]
for it in 1:length(array)
if array[it]==item
push!(ind,it)
end
end
return ind
end
function indsin(ex::Expression,item)
#if simplify(ex)<ex
# warn("Please call simplify on $ex")
#end
inds=Tuple[]
for ti in 1:length(ex)
ind=indsin(ex[ti],item)
if !isempty(ind)
push!(inds,(ti,ind))
end
end
return inds
end
function indsin(c::Component,item)
inds=Any[]
args=getargs(c)
for ai in 1:length(args)
if args[ai]==item||isa(args[ai],item)
push!(inds,ai)
continue
elseif isa(args[ai],N)
continue
end
ind=indsin(args[ai],item)
if !isempty(ind)
push!(inds,(ai,ind))
end
end
return inds
end
function indsin(array::Array,typ::Type)
ind=Int64[]
for it in 1:length(array)
if isa(array[it],typ)
push!(ind,it)
end
end
return ind
end
indsin(n::Number,a)=[]
terms(ex::Expression)=ex.terms
terms(x::X)=x #Term[Factor[x]] #hmm
dcterms(ex::Expression)=terms(deepcopy(ex))
dcterms(x::X)=x
function has(a::Array,t::Type)
for it in a
if isa(it,t)
return true
end
end
return false
end
function has(a::Array,t::EX)
for it in a
if it==t
return true
end
end
return false
end
function has(term1::Term,term2::Term)
if length(term1)<length(term2)
return false
end
for p1 in permutations(term1)
for p2 in permutations(term2)
for l in 1:length(term2)
if p1[1:l]==p2[1:l]
return true
end
end
end
end
return false
end
function has(ex::Expression,x::EX)
for term in ex
for c in term
if c==x || (isa(c,Expression)&&has(c,x)) || (isa(c,Component)&&has(c,x))
return true
end
end
end
return false
end
function has(ex::Expression,t::Type)
for term in ex
for c in term
if isa(c,t) || (isa(c,Expression)&&has(c,t)) || (isa(c,Component)&&has(c,t))
return true
end
end
end
return false
end
function has(c::Component,x::Symbol)
for a in getargs(c)
if has(a,x)
return true
end
end
return false
end
function has(c::Component,x::Type)
if isa(c,x)
return true
end
for a in getargs(c)
if has(a,x)
return true
end
end
return false
end
has(n::N,x::Symbol)=n==x
has(::N, ::Type)=false
function maketype(c::Component,fun)
args=getargs(c)
for argi in 1:length(args)
args[argi]=fun(args[argi])
end
return typeof(c)(args...)
end
function unnest(ex::Expression)
nt=Term[]
for term in ex.terms
nf=Factor[]
for fac in term
if isa(term,Array)
for nested in term
push!(nf,nested)
end
else
push!(nf,fac)
end
end
push!(nt,nf)
end
return Expression(nt)
end
function componify(ex::Expression,raw=false)
ap=dcterms(ex)
lap=length(ap)
for term in 1:lap
exs=Expression[]
xs=X[]
for fac in ap[term]
if isa(fac,Array)
#warn("How did the array $fac end up in $ex?") #through replace!
push!(exs,componify(Expression(fac)))
elseif isa(fac,Expression)
push!(exs,componify(fac))
elseif isa(fac,Component)
fac=maketype(fac,componify)
push!(xs,fac)
else
push!(xs,fac)
end
end
if isempty(exs)
continue
else
tap=exs[1].terms
for x in xs
for tterm in tap
unshift!(tterm,x)
end
end
exs[1]=expression(tap)
while length(exs)>1
ap1=terms(exs[1])
ap2=terms(exs[2])
lap1,lap2=length(ap1),length(ap2)
multed=Term[]
for l in 1:lap1*lap2
push!(multed,Factor[])
end
for l1 in 0:lap1-1
for l2 in 1:lap2
for fac in ap1[l1+1]
push!(multed[l2+l1*lap2],fac)
end
for fac in ap2[l2]
push!(multed[l2+l1*lap2],fac)
end
end
end
exs[2]=expression(multed)
deleteat!(exs,1)
end
ap[term]=terms(exs[1])[1]
for tterm in 2:length(exs[1])
push!(ap,exs[1][tterm])
end
end
end
if raw
return ap #this was needed for previous componify, maybe remove?
else
return expression(ap)
end
end
function componify(a::Array)
if length(a)==1
return componify(extract(a))
end
a=deepcopy(a)
for i in 1:length(a)
a[i]=componify(a[i])
end
return a
end
componify(a::Array{Array})=componify(expression(a))
componify(c::Component)=maketype(c,componify)
componify(x::N)=x
function extract(ex::Expression)
if length(ex.terms)==1&&length(ex.terms[1])==1
return ex[1][1]
end
return ex
end
function extract(a::Array)
if length(a)==1
return a[1]
end
return a
end
import Base.isless
isless(ex::Expression,x::X)=false
isless(x::X,ex::Expression)=true
isless(c::Component,s::N)=false
isless(s::N,c::Component)=true
isless(s::Symbol,n::Number)=false
isless(n::Number,s::Symbol)=true
isless(s::Complex,n::Complex)=real(s)<real(n)?true:imag(s)<imag(n)?true:false
isless(n::Real,s::Complex)=true
isless(s::Complex,n::Real)=false
isless(na::NonAbelian,na2::NonAbelian)=false
isless(a::Symbol,op::Operator)=false
isless(op::Operator,a::Symbol)=false
function isless(c1::Component,c2::Component)
if isa(c1,Operator)||isa(c2,Operator)
return false
end
xi1=complexity(c1)
xi2=complexity(c2)
xi1==xi2?isless(string(c1),string(c2)):xi1<xi2
end
function isless(t1::Term,t2::Term)
xi1=complexity(t1)
xi2=complexity(t2)
xi1==xi2?isless(string(t1),string(t2)):xi1<xi2
end
function isless(ex1::Expression,ex2::Expression)
xi1=complexity(ex1)
xi2=complexity(ex2)
xi1==xi2?isless(string(ex1),string(ex2)):xi1<xi2
end
sort(n::N)=n
sort(c::Component)=maketype(c,sort)
function sort!(ex::Expression)
ap=terms(ex)
for term in ap
sort!(term)
end
return expression(sort!(ap))
end
sort(ex::Expression)=sort!(deepcopy(ex))
include("tensors.jl")
function simplify(ex::Expression)
tex=0
nit=0
while tex!=ex
tex=ex
ex=sumsym(sumnum(componify(ex)))
ap=terms(ex)
if isa(ap,X)
return ap
end
for term in 1:length(ap)
ap[term]=divify!(ap[term])
ap[term]=divbine!(ap[term])
ap[term]=divbinedify!(ap[term])
for fac in 1:length(ap[term])
ap[term][fac]=simplify(ap[term][fac])
end
unsqrt!(ap[term])
#sort!(ap[term])
end
ex=extract(expression(ap)) #better to check if res::N before calling expression instead of extracting?
nit+=1
if has(ex,Ten)
ex=simplify(ex,Ten)
end
if nit>90
warn("Stuck in simplify! Iteration $nit: $ex")
break
end
end
return ex#sort!(ex)
end
function simplify(c::Component)
args=deepcopy(getargs(c))
for arg in 1:length(args)
args[arg]=simplify(args[arg])
end
return typeof(c)(args...)
end
simplify(x::N)=x
simplify(x::N,a)=x
simplify!(x::N)=x
function simplify!(a::Array)
if length(a)==1
return simplify(a[1])
else
for i in 1:length(a)
a[i]=simplify(a[i])
end
end
return a
end
simplify(a::Array)=simplify!(deepcopy(a))
function simplify(d::Dict)
nd=Dict()
for k in keys(d)
nk=simplify(k)
nval=simplify(d[k])
nd[nk]=nval
end
return nd
end
function sumnum(ex::Expression)
terms=dcterms(ex)
nterms=Term[]
numsum=0
for term in terms
prod=1
allnum=true
nterm=Factor[]
for t in term
if typeof(t)<:Number
prod*=t
else
if allnum
allnum=false
end
push!(nterm,t)
end
end
if prod==0
continue
elseif allnum
numsum+=prod
continue
else
if prod!=1
unshift!(nterm,prod)
end
push!(nterms,nterm)
end
end
if numsum!=0
push!(nterms,[numsum])
end
if length(nterms)==1&&length(nterms[1])==1
return nterms[1][1]
elseif isempty(nterms)
return 0
else
return expression(nterms)
end
end
sumnum(c::Component)=typeof(c)(sumnum(getarg(c)))
sumnum(x::N)=x
function sumsym(ex::Expression)
ap=terms(deepcopy(ex))
nap=length(ap)
cs=Array(Components,0)
for add in 1:nap
tcs=Array(Ex,0)
coef=1
for term in ap[add]
if isa(term,Ex)
push!(tcs,term)
elseif isa(term,Number)
coef*=term
else
error("Don't know how to handle $term")
end
end
com=Components(tcs,coef)
ind=indin(cs,com)
if ind==0
push!(cs,com)
else
cs[ind].coef+=com.coef
end
end
ret=expression(cs)
if isa(ret,Expression)&&length(ret.terms)==1&&length(ret.terms[1])==1
return ret[1][1]
else
return ret
end
end
sumsym(c::Component)=maketype(c,sumsym)
sumsym(x::N)=x
function sumsym!(a::Array)
for i in 1:length(a)
a[i]=sumsym(a[i])
end
return a
end
sumsym(a::Array)=sumsym!(deepcopy(a))
function findsyms(term::Array)
syms=Dict()
for fac in 1:length(term)
if isa(term[fac],Symbol)
if haskey(syms,term[fac])
push!(syms[term[fac]],fac)
else
syms[term[fac]]=Any[fac]
end
end
end
return syms
end
function findsyms(ex::Expression)
syms=Set{Symbol}()
for term in ex
for fac in term
if isa(fac,Symbol)
push!(syms,fac)
elseif isa(fac,Expression)||isa(fac,Component)
syms=union(syms,findsyms(fac))
end
end
end
return syms
end
findsyms(c::Component)=findsyms(getarg(c))
findsyms(s::Symbol)=Set([s])
findsyms(n::Number)=Set()
function findsyms(ex::Expression,symdic::Dict)
syminds=Dict()
for k in keys(symdic)
inds=Tuple[]
for ti in 1:length(ex)
for ci in 1:length(ex[ti])
if ex[ti][ci]==k
push!(inds,(ti,ci))
end
end
end
syminds[k]=inds
end
return syminds
end
findsyms(C::Component,symdic::Dict)=findsyms(getarg(c),symdic)
function findcoms(term::Array)
coms=Dict()
for fac in 1:length(term)
if isa(term[fac],Component)
if haskey(coms,term[fac])
push!(coms[term[fac]],fac)
else
coms[term[fac]]=Any[fac]
end
end
end
return coms
end
function hasex(symdic::Dict)
for v in values(symdic)
if isa(v,Ex)
return true
end
end
return false
end
function delexs(symdic::Dict)
sd=deepcopy(symdic)
for k in keys(symdic)
if isa(symdic[k],Ex)
delete!(sd,k)
end
end
return sd
end
function replace!(ex::Expression,symdic::Dict)
for ti in 1:length(ex)
for c in 1:length(ex[ti])
if isa(ex[ti][c],Ex)
rep=replace(ex[ti][c],symdic)
deleteat!(ex[ti],c)
if isa(rep,Array)
for r in rep
insert!(ex[ti],c,r)
end
else
insert!(ex[ti],c,rep)
end
end
end
end
syminds=findsyms(ex,symdic)
for tup in symdic
sym,val=tup
for i in syminds[sym]
if isa(val,Array)
deleteat!(ex[i[1]],i[2])
for r in val
insert!(ex[i[1]],i[2],r)
end
else
ex[i[1]][i[2]]=val
end
end
end
if isa(ex,Array)
ex=extract(expression(ex))
end
return componify(ex)
end
replace(ex::Expression,symdic::Dict)=replace!(deepcopy(ex),symdic)
function replace(c::Component,symdic::Dict)
args=convert(Array{Any},getargs(c))
for arg in 1:length(args)
args[arg]=replace(args[arg],symdic)
end
return typeof(c)(args...)
end
function replace!(a::Array,symdic::Dict)
for i in 1:length(a)
a[i]=replace(a[i],symdic)
end
return a
end
replace(a::Array,symdic::Dict)=replace!(deepcopy(a),symdic)
function replace(s::Symbol,symdic::Dict)
for tup in symdic
sym,val=tup
if s==sym
return simplify(val)
end
end
return s
end
replace(n::Number,symdic::Dict)=n
function evaluate(ex::Ex,symdic::Dict)
ex=simplify(replace(simplify(ex),symdic))
if isa(ex,Expression)&&hasex(symdic)
symdic=delexs(symdic)
ex=evaluate(ex,symdic)
end
return simplify(ex)
end
evaluate(x::Number,symdic::Dict)=x
function randeval(ex::Ex,seed=1)
srand(seed)
syms=findsyms(ex)
d=Dict()
for s in syms
d[s]=rand()
end
evaluate(ex,d)
end
start(ex::Expression)=(1,terms(ex))
function next(ex::Expression,state)
return (state[2][state[1]],(state[1]+1,state[2]))
end
done(ex::Expression,state)=state[1]>length(state[2])
function matches(ex::Expression,pat::Component)
if length(ex)==1
return matches(ex[1],pat)
end
termmds=Array{Dict}[]
for term in ex
push!(termmds,matches(extract(term),pat))
end
nterms=length(termmds)
ndics=Integer[]
for n in 1:nterms
push!(ndics,length(termmds[n]))
end
ninc=reduce(*,ndics)-1
validated=Dict[]
for inc in 0:ninc
indices=ones(Integer,nterms)
tl=0
for ti in 1:nterms
tinc=floor(inc/reduce(*,ndics[1:ti-1]))%ndics[ti]
tl+=ndics[ti]-1
indices[ti]+=tinc
end
if clash(termmds[1][indices[1]],termmds[2][indices[2]])
continue
end
comb=combine(termmds[1][indices[1]],termmds[2][indices[2]])
clashed=false
for tm in 3:length(termmds)
if clash(comb,termmds[tm][indices[tm]])
clashed=true
break
end
comb=combine(comb,termmds[tm][indices[tm]])
end
if clashed
continue
end
push!(validated,comb)
end
return validated
end
function pushall!(a::Array,b::Array)
for c in b
push!(a,c)
end
a
end
pushall!(a::Array,b)=push!(a,b)
|
proofpile-julia0005-42719 | {
"provenance": "014.jsonl.gz:242720"
} | using Base.Test
using QuantumOptics
N = 500
xmin = -62.5
xmax = 70.1
basis_position = PositionBasis(xmin, xmax, N)
basis_momentum = MomentumBasis(basis_position)
# Test Gaussian wave-packet in both bases
x0 = 5.1
p0 = -3.2
sigma = 1.
sigma_x = sigma/sqrt(2)
sigma_p = 1./(sigma*sqrt(2))
psix0 = gaussianstate(basis_position, x0, p0, sigma)
psip0 = gaussianstate(basis_momentum, x0, p0, sigma)
@test_approx_eq 1.0 norm(psix0)
@test_approx_eq 1.0 norm(psip0)
opx_p = momentumoperator(basis_position)
opx_x = positionoperator(basis_position)
opp_p = momentumoperator(basis_momentum)
opp_x = positionoperator(basis_momentum)
@test_approx_eq x0 expect(opx_x, psix0)
@test_approx_eq_eps p0 expect(opx_p, psix0) 0.1
@test_approx_eq p0 expect(opp_p, psip0)
@test_approx_eq_eps x0 expect(opp_x, psip0) 0.1
# Test position and momentum operators
@test_approx_eq_eps expect(-opx_p^2, psix0) expect(laplace_x(basis_position), psix0) 0.3
@test_approx_eq expect(opp_p^2, psip0) expect(laplace_x(basis_momentum), psip0)
@test_approx_eq expect(opx_x^2, psix0) expect(laplace_p(basis_position), psix0)
@test_approx_eq_eps expect(-opp_x^2, psip0) expect(laplace_p(basis_momentum), psip0) 0.3
# Test FFT transformation
function transformation(b1::MomentumBasis, b2::PositionBasis, psi::Ket)
Lp = (b1.pmax - b1.pmin)
dx = particle.spacing(b2)
if b1.N != b2.N || abs(2*pi/dx - Lp)/Lp > 1e-12
throw(IncompatibleBases())
end
N = b1.N
psi_shifted = exp(1im*b2.xmin*(particle.samplepoints(b1)-b1.pmin)).*psi.data
psi_fft = exp(1im*b1.pmin*particle.samplepoints(b2)).*ifft(psi_shifted)*sqrt(N)
return Ket(b2, psi_fft)
end
function transformation(b1::PositionBasis, b2::MomentumBasis, psi::Ket)
Lx = (b1.xmax - b1.xmin)
dp = particle.spacing(b2)
if b1.N != b2.N || abs(2*pi/dp - Lx)/Lx > 1e-12
throw(IncompatibleBases())
end
N = b1.N
psi_shifted = exp(-1im*b2.pmin*(particle.samplepoints(b1)-b1.xmin)).*psi.data
psi_fft = exp(-1im*b1.xmin*particle.samplepoints(b2)).*fft(psi_shifted)/sqrt(N)
return Ket(b2, psi_fft)
end
psix0_fft = transformation(basis_position, basis_momentum, psix0)
psip0_fft = transformation(basis_momentum, basis_position, psip0)
@test_approx_eq_eps x0 expect(opp_x, psix0_fft) 0.1
@test_approx_eq p0 expect(opp_p, psix0_fft)
@test_approx_eq_eps p0 expect(opx_p, psip0_fft) 0.1
@test_approx_eq x0 expect(opx_x, psip0_fft)
@test_approx_eq_eps 0. norm(psix0_fft - psip0) 1e-12
@test_approx_eq_eps 0. norm(psip0_fft - psix0) 1e-12
Tpx = particle.FFTOperator(basis_momentum, basis_position)
Txp = particle.FFTOperator(basis_position, basis_momentum)
psix0_fft = Tpx*psix0
psip0_fft = Txp*psip0
@test_approx_eq_eps x0 expect(opp_x, psix0_fft) 0.1
@test_approx_eq p0 expect(opp_p, psix0_fft)
@test_approx_eq_eps p0 expect(opx_p, psip0_fft) 0.1
@test_approx_eq x0 expect(opx_x, psip0_fft)
@test_approx_eq_eps 0. norm(psix0_fft - psip0) 1e-12
@test_approx_eq_eps 0. norm(psip0_fft - psix0) 1e-12
@test_approx_eq_eps 0. norm(dagger(Tpx*psix0) - dagger(psix0)*dagger(Tpx)) 1e-12
@test_approx_eq_eps 0. norm(dagger(Txp*psip0) - dagger(psip0)*dagger(Txp)) 1e-12
psi_ = deepcopy(psip0)
operators.gemv!(Complex(1.), Tpx, psix0, Complex(0.), psi_)
@test_approx_eq_eps 0. norm(psi_ - psip0) 1e-12
operators.gemv!(Complex(1.), Tpx, psix0, Complex(1.), psi_)
@test_approx_eq_eps 0. norm(psi_ - 2*psip0) 1e-12
psi_ = deepcopy(psix0)
operators.gemv!(Complex(1.), Txp, psip0, Complex(0.), psi_)
@test_approx_eq_eps 0. norm(psi_ - psix0) 1e-12
operators.gemv!(Complex(1.), Txp, psip0, Complex(1.), psi_)
@test_approx_eq_eps 0. norm(psi_ - 2*psix0) 1e-12
rhox0x0 = tensor(psix0, dagger(psix0))
rhox0p0 = tensor(psix0, dagger(psip0))
rhop0x0 = tensor(psip0, dagger(psix0))
rhop0p0 = tensor(psip0, dagger(psip0))
rho_ = DenseOperator(basis_momentum, basis_position)
operators.gemm!(Complex(1.), Tpx, rhox0x0, Complex(0.), rho_)
@test_approx_eq_eps 0. tracedistance(rho_, rhop0x0) 1e-5
@test_approx_eq_eps 0. tracedistance(Tpx*rhox0x0, rhop0x0) 1e-5
rho_ = DenseOperator(basis_position, basis_momentum)
operators.gemm!(Complex(1.), rhox0x0, Txp, Complex(0.), rho_)
@test_approx_eq_eps 0. tracedistance(rho_, rhox0p0) 1e-5
@test_approx_eq_eps 0. tracedistance(rhox0x0*Txp, rhox0p0) 1e-5
rho_ = DenseOperator(basis_momentum, basis_momentum)
operators.gemm!(Complex(1.), Tpx, rhox0p0, Complex(0.), rho_)
@test_approx_eq_eps 0. tracedistance(rho_, rhop0p0) 1e-5
@test_approx_eq_eps 0. tracedistance(Tpx*rhox0x0*Txp, rhop0p0) 1e-5
rho_ = DenseOperator(basis_momentum, basis_momentum)
operators.gemm!(Complex(1.), rhop0x0, Txp, Complex(0.), rho_)
@test_approx_eq_eps 0. tracedistance(rho_, rhop0p0) 1e-5
@test_approx_eq_eps 0. tracedistance(Txp*rhop0p0*Tpx, rhox0x0) 1e-5
# Test FFT with lazy product
psi_ = deepcopy(psix0)
operators.gemv!(Complex(1.), LazyProduct(Txp, Tpx), psix0, Complex(0.), psi_)
@test_approx_eq_eps 0. norm(psi_ - psix0) 1e-12
@test_approx_eq_eps 0. norm(Txp*(Tpx*psix0) - psix0) 1e-12
psi_ = deepcopy(psix0)
I = dense_identity(basis_momentum)
operators.gemv!(Complex(1.), LazyProduct(Txp, I, Tpx), psix0, Complex(0.), psi_)
@test_approx_eq_eps 0. norm(psi_ - psix0) 1e-12
@test_approx_eq_eps 0. norm(Txp*I*(Tpx*psix0) - psix0) 1e-12
# Test dense FFT operator
Txp_dense = DenseOperator(Txp)
Tpx_dense = DenseOperator(Tpx)
@test typeof(Txp_dense) == DenseOperator
@test typeof(Tpx_dense) == DenseOperator
@test_approx_eq_eps 0. tracedistance(Txp_dense*rhop0p0*Tpx_dense, rhox0x0) 1e-5
|
proofpile-julia0005-42720 | {
"provenance": "014.jsonl.gz:242721"
} | using Nuklear
using Nuklear.LibNuklear
using Nuklear.GLFWBackend
using GLFW, ModernGL
const WINDOW_WIDTH = 1200
const WINDOW_HEIGHT = 800
const MAX_VERTEX_BUFFER = 512 * 1024
const MAX_ELEMENT_BUFFER = 128 * 1024
@static if Sys.isapple()
const VERSION_MAJOR = 3
const VERSION_MINOR = 3
end
@static if Sys.isapple()
GLFW.WindowHint(GLFW.CONTEXT_VERSION_MAJOR, VERSION_MAJOR)
GLFW.WindowHint(GLFW.CONTEXT_VERSION_MINOR, VERSION_MINOR)
GLFW.WindowHint(GLFW.OPENGL_PROFILE, GLFW.OPENGL_CORE_PROFILE)
GLFW.WindowHint(GLFW.OPENGL_FORWARD_COMPAT, GL_TRUE)
else
GLFW.DefaultWindowHints()
end
# set up GLFW error callback
error_callback(err::GLFW.GLFWError) = @error "GLFW ERROR: code $(err.code) msg: $(err.description)"
GLFW.SetErrorCallback(error_callback)
# create window
win = GLFW.CreateWindow(WINDOW_WIDTH, WINDOW_HEIGHT, "Demo")
@assert win != C_NULL
GLFW.MakeContextCurrent(win)
glViewport(0, 0, WINDOW_WIDTH, WINDOW_HEIGHT)
# init context
ctx = nk_glfw3_init(win, NK_GLFW3_INSTALL_CALLBACKS, MAX_VERTEX_BUFFER, MAX_ELEMENT_BUFFER)
# init font
prefix = joinpath(@__DIR__, "extra_font")
nk_glfw3_font_stash_begin()
roboto = nk_font_atlas_add_from_file(glfw.atlas, joinpath(prefix, "Roboto-Bold.ttf"), 18, C_NULL)
nk_glfw3_font_stash_end()
roboto_handle = nk_get_font_handle(roboto)
nk_style_set_font(ctx, roboto_handle)
# states
const EASY = 0
const HARD = 1
op = EASY
property = Ref{Cint}(20)
bg = nk_colorf(0.10, 0.18, 0.24, 1.0)
while !GLFW.WindowShouldClose(win)
global op
global bg
global ctx
GLFW.PollEvents()
nk_glfw3_new_frame()
if Bool(nk_begin(ctx, "Demo", nk_rect(50, 50, 230, 250),
NK_WINDOW_BORDER|NK_WINDOW_MOVABLE|NK_WINDOW_SCALABLE|
NK_WINDOW_MINIMIZABLE|NK_WINDOW_TITLE))
nk_layout_row_static(ctx, 30, 80, 1)
nk_button_label(ctx, "button") == 1 && @show("button pressed")
nk_layout_row_dynamic(ctx, 30, 2)
Bool(nk_option_label(ctx, "easy", op == EASY)) && (op = EASY;)
Bool(nk_option_label(ctx, "hard", op == HARD)) && (op = HARD;)
nk_layout_row_dynamic(ctx, 25, 1)
nk_property_int(ctx, "Compression:", 0, property, 100, 10, 1)
nk_layout_row_dynamic(ctx, 20, 1)
nk_label(ctx, "background:", NK_TEXT_LEFT)
nk_layout_row_dynamic(ctx, 25, 1)
if Bool(nk_combo_begin_color(ctx, nk_rgb_cf(bg), nk_vec2(nk_widget_width(ctx),400)))
nk_layout_row_dynamic(ctx, 120, 1)
bg = nk_color_picker(ctx, bg, NK_RGBA)
nk_layout_row_dynamic(ctx, 25, 1)
r = nk_propertyf(ctx, "#R:", 0, bg.r, 1.0, 0.01, 0.005)
g = nk_propertyf(ctx, "#G:", 0, bg.g, 1.0, 0.01, 0.005)
b = nk_propertyf(ctx, "#B:", 0, bg.b, 1.0, 0.01, 0.005)
a = nk_propertyf(ctx, "#A:", 0, bg.a, 1.0, 0.01, 0.005)
bg = nk_colorf(r, g, b, a)
nk_combo_end(ctx)
end
end
nk_end(ctx)
# draw
width, height = GLFW.GetWindowSize(win)
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT)
glClearColor(bg.r, bg.g, bg.b, bg.a)
nk_glfw3_render(NK_ANTI_ALIASING_ON, MAX_VERTEX_BUFFER, MAX_ELEMENT_BUFFER)
GLFW.SwapBuffers(win)
end
nk_glfw3_shutdown()
GLFW.DestroyWindow(win)
|
proofpile-julia0005-42721 | {
"provenance": "014.jsonl.gz:242722"
} | test01_eigvals = [
-0.15883100381194412 - 13.264252860693972im
-0.15883100381194412 + 13.264252860693972im
]
test01_eigvals_psat = [
-0.15883 - 13.2643im
-0.15883 + 13.2643im
]
test02_eigvals = [
-999.9999922288165 + 0.0im
-999.9999824863403 + 0.0im
-5.667091663572526 + 0.0im
-4.939539293199701 - 7.861631594468724im
-4.939539293199701 + 7.861631594468724im
-4.653008913694498 + 0.0im
-4.144476689106577 - 7.59715123723563im
-4.144476689106577 + 7.59715123723563im
-1.3798285681206308 - 16.03637615711796im
-1.3798285681206308 + 16.03637615711796im
-0.9405263132225796 - 13.949752815351614im
-0.9405263132225796 + 13.949752815351614im
-0.4805073944481576 - 0.5379518986751338im
-0.4805073944481576 + 0.5379518986751338im
-0.27347592115017016 - 0.4565372544427486im
-0.27347592115017016 + 0.4565372544427486im
]
test02_eigvals_psat = [
-999.999990000000 + 0.00000000000000im
-999.999980000000 + 0.00000000000000im
-5.67379000000000 + 0.00000000000000im
-4.93393000000000 - 7.85614000000000im
-4.93393000000000 + 7.85614000000000im
-4.64127000000000 + 0.00000000000000im
-4.14297000000000 - 7.59557000000000im
-4.14297000000000 + 7.59557000000000im
-1.36440000000000 - 14.5583400000000im
-1.36440000000000 + 14.5583400000000im
-0.967800000000000 - 12.6558000000000im
-0.967800000000000 + 12.6558000000000im
-0.471260000000000 - 0.579790000000000im
-0.471260000000000 + 0.579790000000000im
-0.283050000000000 - 0.460360000000000im
-0.283050000000000 + 0.460360000000000im
]
test03_eigvals = [
-1000.0000000405296 + 0.0im
-1000.0000000190737 + 0.0im
-56.28815836405992 + 0.0im
-53.12282741877609 + 0.0im
-5.957028010206276 + 0.0im
-5.02141964268554 - 7.898682332162664im
-5.02141964268554 + 7.898682332162664im
-4.8668814493140715 + 0.0im
-4.218933906222015 - 7.617379500840438im
-4.218933906222015 + 7.617379500840438im
-2.117324030780644 + 0.0im
-2.029229441492322 + 0.0im
-1.7025637072900843 - 16.974819568195652im
-1.7025637072900843 + 16.974819568195652im
-1.1756081480320293 - 14.621543797706247im
-1.1756081480320293 + 14.621543797706247im
-0.3806667287614366 - 0.5866699857779792im
-0.3806667287614366 + 0.5866699857779792im
-0.223947575088223 - 0.4971805954842863im
-0.223947575088223 + 0.4971805954842863im
]
test03_eigvals_psat = [
-1000.00000000000 + 0.00000000000000im
-1000.00000000000 + 0.00000000000000im
-56.4167900000000 + 0.00000000000000im
-53.3586000000000 + 0.00000000000000im
-5.95708000000000 + 0.00000000000000im
-5.02325000000000 - 7.89808000000000im
-5.02325000000000 + 7.89808000000000im
-4.82461000000000 + 0.00000000000000im
-4.21943000000000 - 7.61745000000000im
-4.21943000000000 + 7.61745000000000im
-2.12296000000000 + 0.00000000000000im
-2.03127000000000 + 0.00000000000000im
-1.63531000000000 - 15.4006100000000im
-1.63531000000000 + 15.4006100000000im
-1.19151000000000 - 13.1976800000000im
-1.19151000000000 + 13.1976800000000im
-0.351320000000000 - 0.628360000000000im
-0.351320000000000 + 0.628360000000000im
-0.237770000000000 - 0.501610000000000im
-0.237770000000000 + 0.501610000000000im
]
test04_eigvals = [
-1000.0000000605402 + 0.0im
-1000.0000000226693 + 0.0im
-57.04885681143213 - 637.6983559917089im
-57.04885681143213 + 637.6983559917089im
-56.354194606909395 + 0.0im
-53.18155023445317 + 0.0im
-13.892570544392896 - 469.6478362124919im
-13.892570544392896 + 469.6478362124919im
-5.958255480381696 + 0.0im
-5.021533592090426 - 7.898654827966522im
-5.021533592090426 + 7.898654827966522im
-4.868492180624505 + 0.0im
-4.21902585809358 - 7.617326092491787im
-4.21902585809358 + 7.617326092491787im
-2.117386441281952 + 0.0im
-2.029241696160179 + 0.0im
-1.6959817848903833 - 16.973813636829668im
-1.6959817848903833 + 16.973813636829668im
-1.1682442396700994 - 14.616878713067868im
-1.1682442396700994 + 14.616878713067868im
-0.3805794727021142 - 0.586700331651268im
-0.3805794727021142 + 0.586700331651268im
-0.22394101115665865 - 0.49721792171561435im
-0.22394101115665865 + 0.49721792171561435im
]
test05_eigvals = [
-1000.0000037690621 + 0.0im
-1000.000001656506 + 0.0im
-126.28458815493319 + 0.0im
-120.85493880185086 + 0.0im
-46.48594631374737 + 0.0im
-44.373301051079025 + 0.0im
-10.0 + 0.0im
-10.0 + 0.0im
-3.9151560397679868 - 7.4523722134285535im
-3.9151560397679868 + 7.4523722134285535im
-3.836126674242972 - 7.360635983756108im
-3.836126674242972 + 7.360635983756108im
-0.3447395139088272 - 12.01547546088681im
-0.3447395139088272 + 12.01547546088681im
-0.20916948755870068 - 10.607466623279306im
-0.20916948755870068 + 10.607466623279306im
-0.16185759374101516 - 1.055686298197309im
-0.16185759374101516 + 1.055686298197309im
-0.0354521777174975 - 0.684452609868359im
-0.0354521777174975 + 0.684452609868359im
]
test06_eigvals = [
-1000.0000053062265 + 0.0im
-1000.0000018990927 + 0.0im
-126.75458543123136 + 0.0im
-121.57403285596196 + 0.0im
-46.52052665063294 + 0.0im
-44.44016429276653 + 0.0im
-34.69415568906223 - 514.6812519452002im
-34.69415568906223 + 514.6812519452002im
-10.0 + 0.0im
-10.0 + 0.0im
-9.032779692453872 - 426.00371528493883im
-9.032779692453872 + 426.00371528493883im
-3.915125641859613 - 7.452252518206685im
-3.915125641859613 + 7.452252518206685im
-3.8358938132681137 - 7.35974852776193im
-3.8358938132681137 + 7.35974852776193im
-0.3416921618276846 - 12.015383170233138im
-0.3416921618276846 + 12.015383170233138im
-0.2070154019752105 - 10.607728100520474im
-0.2070154019752105 + 10.607728100520474im
-0.16160669596957933 - 1.0557462673615183im
-0.16160669596957933 + 1.0557462673615183im
-0.03542058981778998 - 0.6844527516049774im
-0.03542058981778998 + 0.6844527516049774im
]
test07_eigvals = [
-999.9999914519317 + 0.0im
-999.9999811538516 + 0.0im
-5.027556777415359 + 0.0im
-4.718819382076426 - 7.793357006494638im
-4.718819382076426 + 7.793357006494638im
-4.102660291475314 + 0.0im
-4.052932897118745 - 7.552138242722714im
-4.052932897118745 + 7.552138242722714im
-1.786149751424446 - 14.793097612110634im
-1.786149751424446 + 14.793097612110634im
-1.2196573076881576 - 10.886078373690756im
-1.2196573076881576 + 10.886078373690756im
-0.8706541640279326 - 305.1344520297189im
-0.8706541640279326 + 305.1344520297189im
-0.5028032785077488 - 0.5630005288071646im
-0.5028032785077488 + 0.5630005288071646im
-0.29876013022174563 - 0.4533947142923075im
-0.29876013022174563 + 0.4533947142923075im
-0.263305765310502 - 177.9037730410636im
-0.263305765310502 + 177.9037730410636im
-0.1738090829698704 - 62.759642648170946im
-0.1738090829698704 + 62.759642648170946im
-0.15473139663326663 - 136.7874068069445im
-0.15473139663326663 + 136.7874068069445im
]
test08_eigvals = [
-2285.9589650663725 - 6825.3997886670795im
-2285.9589650663725 + 6825.3997886670795im
-2095.818014258244 - 6533.140681149019im
-2095.818014258244 + 6533.140681149019im
-1595.4562252195476 - 233.8877642497275im
-1595.4562252195476 + 233.8877642497275im
-986.7365548177571 + 0.0im
-500.00000000000034 + 0.0im
-471.8999089077167 + 0.0im
-220.89110522830256 + 0.0im
-50.32121120551376 + 0.0im
-50.245608339393456 + 0.0im
-34.244848184557476 - 260.53071319880445im
-34.244848184557476 + 260.53071319880445im
-11.303322380605753 + 0.0im
-11.19036178279552 + 0.0im
-7.691789694217273 - 28.802262759948757im
-7.691789694217273 + 28.802262759948757im
-4.513698394541721 + 0.0im
]
test09_eigvals = [
-2266.7564379695177 - 6842.581227177472im
-2266.7564379695177 + 6842.581227177472im
-2102.099837452439 - 6516.907948869909im
-2102.099837452439 + 6516.907948869909im
-1603.381787508888 - 299.73078962017183im
-1603.381787508888 + 299.73078962017183im
-999.9999853200115 + 0.0im
-981.5890587115106 + 0.0im
-499.9999999999994 + 0.0im
-472.94184918788915 + 0.0im
-222.99856689050083 + 0.0im
-58.916888833164734 - 297.3751281905745im
-58.916888833164734 + 297.3751281905745im
-50.391739774950395 + 0.0im
-50.28496360614687 + 0.0im
-11.312374832548043 + 0.0im
-11.192736801973947 + 0.0im
-6.304389558457761 - 25.99314750112348im
-6.304389558457761 + 25.99314750112348im
-4.936441589468676 - 7.860345016545073im
-4.936441589468676 + 7.860345016545073im
-4.874648728527242 + 0.0im
-4.628324873366437 + 0.0im
-1.3057837545018711 - 14.985883650327404im
-1.3057837545018711 + 14.985883650327404im
-0.522548405283088 - 0.5356190189108261im
-0.522548405283088 + 0.5356190189108261im
]
test10_eigvals = [
-2272.361404997375 - 6851.776097141858im
-2272.361404997375 + 6851.776097141858im
-2124.253409194327 - 6511.932840117861im
-2124.253409194327 + 6511.932840117861im
-1575.608572024949 - 348.98204359994224im
-1575.608572024949 + 348.98204359994224im
-999.9999861307571 + 0.0im
-960.833224373504 + 0.0im
-499.9999999999994 + 0.0im
-477.8792003131099 + 0.0im
-218.19978493862791 + 0.0im
-75.81020867999811 - 305.1528459259956im
-75.81020867999811 + 305.1528459259956im
-50.41186110201852 + 0.0im
-50.352254884885475 + 0.0im
-11.32379102275944 + 0.0im
-11.22342662568646 + 0.0im
-7.581779910900623 - 27.826786728047516im
-7.581779910900623 + 27.826786728047516im
-5.484429063458374 + 0.0im
-3.9465322340765994 - 7.508941380094648im
-3.9465322340765994 + 7.508941380094648im
-2.907232875387719 + 0.0im
-0.7994143393863338 - 14.039239192684892im
-0.7994143393863338 + 14.039239192684892im
-0.13279249045498032 - 0.4749686774987308im
-0.13279249045498032 + 0.4749686774987308im
]
test11_eigvals = [
-2280.2076336006976 - 6841.438512443791im
-2280.2076336006976 + 6841.438512443791im
-1997.9475701466922 - 6457.618671478096im
-1997.9475701466922 + 6457.618671478096im
-1538.5966307902208 - 345.7913984886787im
-1538.5966307902208 + 345.7913984886787im
-1250.2936918022115 - 9608.999772834375im
-1250.2936918022115 + 9608.999772834375im
-999.9999874645969 + 0.0im
-956.6099185810597 + 0.0im
-863.7185028125181 - 6165.596120417795im
-863.7185028125181 + 6165.596120417795im
-500.0 + 0.0im
-478.235862371146 + 0.0im
-218.28706882452354 + 0.0im
-58.31526552633564 - 299.61738004327435im
-58.31526552633564 + 299.61738004327435im
-50.414620572174904 + 0.0im
-50.35016783858917 + 0.0im
-42.437976630212574 - 467.0560264577345im
-42.437976630212574 + 467.0560264577345im
-11.323801014242834 + 0.0im
-11.223449045909994 + 0.0im
-7.577419561243316 - 27.833300571016597im
-7.577419561243316 + 27.833300571016597im
-5.483728552089268 + 0.0im
-3.9464440004154135 - 7.508758038269464im
-3.9464440004154135 + 7.508758038269464im
-2.9069124155527093 + 0.0im
-0.8024184353347599 - 14.03943882277388im
-0.8024184353347599 + 14.03943882277388im
-0.13278021568161175 - 0.4749742367984249im
-0.13278021568161175 + 0.4749742367984249im
]
test12_eigvals = [
-0.8055565836807297 - 0.5525321844563806im,
-0.8055565836807297 + 0.5525321844563806im,
-0.5562604430028794 - 13.482773238020515im,
-0.5562604430028794 + 13.482773238020515im,
0.0 + 0.0im,
]
test13_eigvals = [
-10459.358851638417 + 0.0im
-9518.115670592897 + 0.0im
-187.23828129745846 + 0.0im
-10.106065550045505 + 0.0im
-5.224596052187488 + 0.0im
-4.501324080186281 + 0.0im
-2.2096561921652595 + 0.0im
-1.364291810141959 - 16.041013087225934im
-1.364291810141959 + 16.041013087225934im
-0.9689898367154597 - 0.5406190107493515im
-0.9689898367154597 + 0.5406190107493515im
-0.912054975024942 - 14.019102730055716im
-0.912054975024942 + 14.019102730055716im
-0.7804036299927885 - 3.4337139420134424im
-0.7804036299927885 + 3.4337139420134424im
-0.01997649074097721 + 0.0im
]
test14_eigvals = [
-2211.5592709440352 - 6867.397622238308im,
-2211.5592709440352 + 6867.397622238308im,
-2153.3431924567208 - 6491.972141691999im,
-2153.3431924567208 + 6491.972141691999im,
-1462.9335729586219 - 695.7972901292562im,
-1462.9335729586219 + 695.7972901292562im,
-970.0160993678469 + 0.0im,
-500.00000000000045 + 0.0im,
-490.8250468494144 + 0.0im,
-202.2656322427033 - 420.17747462821575im,
-202.2656322427033 + 420.17747462821575im,
-189.55369688202828 + 0.0im,
-50.54441602439222 + 0.0im,
-50.45006132952855 + 0.0im,
-17.63867329949132 - 46.366884762058845im,
-17.63867329949132 + 46.366884762058845im,
-11.355851160484914 + 0.0im,
-11.335928224977632 + 0.0im,
-1.4563561997747976 - 8.058315342439121im,
-1.4563561997747976 + 8.058315342439121im,
-0.5056731355961069 + 0.0im,
0.0 + 0.0im,
]
test15_eigvals = [
-39.22517135042766 - 0.23707459763811717im
-39.22517135042766 + 0.23707459763811717im
-8.162038320970622 + 0.0im
-0.6894464243695174 - 8.596453673443301im
-0.6894464243695174 + 8.596453673443301im
-0.20829262548711688 + 0.0im
]
test15_eigvals_no_sat = [
-41.28060502979358 + 0.0im
-38.77124317334057 + 0.0im
-6.080781526447125 + 0.0im
-0.6716198831075708 - 8.713016722797422im
-0.6716198831075708 + 8.713016722797422im
-0.12135746559823009 + 0.0im
]
test15_eigvals_high_sat = [
-38.90919918657826 - 0.5990898792027552im
-38.90919918657826 + 0.5990898792027552im
-8.716992122605445 + 0.0im
-0.7802024926568868 - 8.344701265008863im
-0.7802024926568868 + 8.344701265008863im
-0.34108463674219724 + 0.0im
]
test16_eigvals = [
-40.86489059167709 + 0.0im
-38.79830220459659 + 0.0im
-6.574706325237592 + 0.0im
-0.6823066948004968 - 8.675007074248816im
-0.6823066948004968 + 8.675007074248816im
-0.14426163605639922 + 0.0im
]
test16_eigvals_high_sat = [
-40.47662880667318 + 0.0im
-38.83801972051611 + 0.0im
-7.051748409012963 + 0.0im
-0.6774433789575647 - 8.665077736802273im
-0.6774433789575647 + 8.665077736802273im
-0.15891752736421258 + 0.0im
]
test17_eigvals = [
-999.9999972744532 + 0.0im
-41.280911913534204 + 0.0im
-38.76959077602596 + 0.0im
-6.091730762853858 + 0.0im
-4.397466628592957 - 7.701876240796052im
-4.397466628592957 + 7.701876240796052im
-0.667225952071195 - 8.709471358908846im
-0.667225952071195 + 8.709471358908846im
-0.19039981492553382 - 0.3625458335906406im
-0.19039981492553382 + 0.3625458335906406im
]
test18_eigvals = [
-28.693211883382347 + 0.0im
-9.84238486492247 + 0.0im
-1.7031770286738355 - 8.629174022229318im
-1.7031770286738355 + 8.629174022229318im
-0.7640548534736492 + 0.0im
]
test19_eigvals = [
-28.76860514709647 + 0.0im
-11.070928271739678 + 0.0im
-1.665595867723869 - 8.572069631777202im
-1.665595867723869 + 8.572069631777202im
-0.4928568982924553 + 0.0im
]
test20_eigvals = [
-100.00021842764602 + 0.0im
-39.23707786473363 - 0.30958964511252446im
-39.23707786473363 + 0.30958964511252446im
-22.649793103911897 + 0.0im
-8.543255967128875 + 0.0im
-4.268315666497509 - 5.271829751631857im
-4.268315666497509 + 5.271829751631857im
-0.6848864366573055 - 8.58151201322762im
-0.6848864366573055 + 8.58151201322762im
-0.3129633046035981 - 0.5454321262659743im
-0.3129633046035981 + 0.5454321262659743im
]
test20_eigvals_sat = [
-100.00021924248418 + 0.0im
-39.237422024473375 - 0.3111853351055383im
-39.237422024473375 + 0.3111853351055383im
-23.53165375119335 + 0.0im
-8.410026826125367 + 0.0im
-4.083256130308611 - 6.7290588685710615im
-4.083256130308611 + 6.7290588685710615im
-0.6809735490286486 - 8.581164994672914im
-0.6809735490286486 + 8.581164994672914im
-0.2887994589525554 - 0.4637034811911777im
-0.2887994589525554 + 0.4637034811911777im
]
test21_eigvals = [
-100.00020053262892 + 0.0im
-41.279576521715754 + 0.0im
-38.7876974005338 + 0.0im
-22.69484347984886 + 0.0im
-7.165287493953102 + 0.0im
-6.28758745738531 + 0.0im
-4.319362829032215 - 5.205827008438781im
-4.319362829032215 + 5.205827008438781im
-1.6673562530234518 - 1.8236637052912814im
-1.6673562530234518 + 1.8236637052912814im
-0.7102160509747345 - 8.694108632428092im
-0.7102160509747345 + 8.694108632428092im
-0.2846615017660007 - 0.607222973124181im
-0.2846615017660007 + 0.607222973124181im
]
test22_eigvals = [
-100.00020053242672 + 0.0im
-41.2798088140291 + 0.0im
-38.788275874605674 + 0.0im
-22.69485159080186 + 0.0im
-6.302325814485422 + 0.0im
-4.931267158171579 + 0.0im
-4.31725705837116 - 5.200834867430221im
-4.31725705837116 + 5.200834867430221im
-1.2329330244784797 + 0.0im
-0.7463112762985753 - 8.85884300731678im
-0.7463112762985753 + 8.85884300731678im
-0.28579333866010825 - 0.6025484098882521im
-0.28579333866010825 + 0.6025484098882521im
]
test23_eigvals = [
-2285.984972487398 - 6825.409970531314im
-2285.984972487398 + 6825.409970531314im
-2095.8292998741636 - 6533.139948007082im
-2095.8292998741636 + 6533.139948007082im
-1596.153193009681 - 233.6009166562695im
-1596.153193009681 + 233.6009166562695im
-986.970453826522 + 0.0im
-50.61566828819503 + 0.0im
-50.24422063249634 + 0.0im
-33.8405217631388 - 255.4659930547156im
-33.8405217631388 + 255.4659930547156im
-16.35252074296158 - 35.19268456048173im
-16.35252074296158 + 35.19268456048173im
-11.402718862758675 + 0.0im
-11.30330507373553 + 0.0im
]
test24_eigvals = [
-16167.452158969038 - 8.043416230174378im
-16167.452158969038 + 8.043416230174378im
-306.82201246319016 - 5867.887293375189im
-306.82201246319016 + 5867.887293375189im
-258.57819319671626 - 4795.264357598769im
-258.57819319671626 + 4795.264357598769im
-202.1765718936583 - 520.5322326644553im
-202.1765718936583 + 520.5322326644553im
-26.89823743203937 - 74.8261131632425im
-26.89823743203937 + 74.8261131632425im
-14.146571409094378 - 5.185851435997753im
-14.146571409094378 + 5.185851435997753im
-10.131535087046432 + 0.0im
-1.8914933786938954 - 0.005547477442692941im
-1.8914933786938954 + 0.005547477442692941im
]
test26_eigvals = [
-39.22608968461495 - 0.24435922038292715im
-39.22608968461495 + 0.24435922038292715im
-8.153259227989246 + 0.0im
-0.9387139673515449 + 0.0im
-0.6886612978044744 - 8.596257367969528im
-0.6886612978044744 + 8.596257367969528im
-0.2390456681797083 - 0.17819776543201055im
-0.2390456681797083 + 0.17819776543201055im
]
test26_eigvals_noTE = [
-39.47393476296245 + 0.0im
-38.90941405619594 + 0.0im
-8.225656744581503 + 0.0im
-0.6876080511263236 - 8.603146074166037im
-0.6876080511263236 + 8.603146074166037im
-0.24592182992188777 - 0.14919699127432942im
-0.24592182992188777 + 0.14919699127432942im
]
test29_eigvals = [
-59.7320535837085 + 0.0im
-50.00000000000001 + 0.0im
-50.0 + 0.0im
-50.0 + 0.0im
-45.133973208145804 - 8.692497819063023im
-45.133973208145804 + 8.692497819063023im
-20.0 + 0.0im
-5.0 + 0.0im
0.0 + 0.0im
]
test29_eigvals_fflag = [
-59.772220372205 + 0.0im
-51.0447940895546 - 7.0658041406756436im
-51.0447940895546 + 7.0658041406756436im
-50.0 + 0.0im
-45.18265590251619 - 8.61499070758468im
-45.18265590251619 + 8.61499070758468im
-20.0 + 0.0im
-5.873878451784012 - 6.347167941328094im
-5.873878451784012 + 6.347167941328094im
-5.0 + 0.0im
-0.02512274008534303 + 0.0im
0.0 + 0.0im
]
test29_eigvals_Rflag = [
-76.91753318136166 - 32.2358556345301im
-76.91753318136166 + 32.2358556345301im
-50.0 + 0.0im
-49.999999999999986 - 9.959757193250047e-8im
-49.999999999999986 + 9.959757193250047e-8im
-7.881077881909685 - 31.52570878189592im
-7.881077881909685 + 31.52570878189592im
-5.0 + 0.0im
-0.40277787345725297 + 0.0im
0.0 + 0.0im
]
test29_eigvals_Rflag_no_Vflag = [
-76.91753318136163 - 32.235855634530125im
-76.91753318136163 + 32.235855634530125im
-50.0 + 0.0im
-49.999999999999986 - 2.669704642995615e-7im
-49.999999999999986 + 2.669704642995615e-7im
-7.8810778819096985 - 31.525708781895904im
-7.8810778819096985 + 31.525708781895904im
-5.0 + 0.0im
-0.4027778734572379 + 0.0im
]
test29_eigvals_Rflag_Qflag = [
-50.00000000000023 + 0.0im
-50.0 + 0.0im
-49.99977208639873 + 0.0im
-48.583188396862354 - 43.26816561002791im
-48.583188396862354 + 43.26816561002791im
-19.851972845705998 + 0.0im
-5.0 + 0.0im
-2.616189310673934 + 0.0im
-0.1828444817481023 - 0.361230702596121im
-0.1828444817481023 + 0.361230702596121im
]
test30_eigvals_with_filt = [
-76.92307692307699 + 0.0im
-49.99999999999998 + 0.0im
-39.22608968461492 - 0.24435922038297828im
-39.22608968461492 + 0.24435922038297828im
-8.15325922798925 + 0.0im
-1.0000000000000009 + 0.0im
-0.938713967351545 + 0.0im
-0.6886612978044739 - 8.596257367969525im
-0.6886612978044739 + 8.596257367969525im
-0.6060606060606061 + 0.0im
-0.23904566817970954 - 0.17819776543200883im
-0.23904566817970954 + 0.17819776543200883im
0.0 + 0.0im
]
test30_eigvals_no_filt = [
-50.0 + 0.0im
-39.226089684614934 - 0.24435922038295738im
-39.226089684614934 + 0.24435922038295738im
-8.153259227989247 + 0.0im
-0.9387139673515453 + 0.0im
-0.688661297804473 - 8.59625736796952im
-0.688661297804473 + 8.59625736796952im
-0.6060606060606061 + 0.0im
-0.2390456681797072 - 0.17819776543201066im
-0.2390456681797072 + 0.17819776543201066im
0.0 + 0.0im
0.0 + 0.0im
]
test31_eigvals = [
-41.28028496346849 + 0.0im
-38.772141289217586 + 0.0im
-6.0734966129815335 + 0.0im
-3.2959209145486295 + 0.0im
-0.9334113090863949 + 0.0im
-0.714334242846485 - 8.712902325176582im
-0.714334242846485 + 8.712902325176582im
-0.196671279155743 - 0.19929469863615853im
-0.196671279155743 + 0.19929469863615853im
-0.1790842443333912 + 0.0im
-0.0491682970453784 + 0.0im
-0.03336518379541009 + 0.0im
]
test33_eigvals_constantP = [
-39.45761150626904 + 0.0im
-38.80018214770829 + 0.0im
-7.781678192463298 + 0.0im
-0.9355834221192385 - 8.472947434030154im
-0.9355834221192385 + 8.472947434030154im
-0.535017710389672 + 0.0im
]
test33_eigvals_constantI = [
-39.154101609632264 - 0.30688836000778646im
-39.154101609632264 + 0.30688836000778646im
-7.780866011433047 + 0.0im
-0.9320771142348544 - 8.476045052644412im
-0.9320771142348544 + 8.476045052644412im
-0.536696069017935 + 0.0im
]
test33_eigvals_constantZ = [
-39.17600477596733 - 0.5179363938174002im
-39.17600477596733 + 0.5179363938174002im
-7.780182413917664 + 0.0im
-0.9290443071358351 - 8.478692979297731im
-0.9290443071358351 + 8.478692979297731im
-0.5381468520266435 + 0.0im
]
test37_eigvals = [
-29.354494961308443 - 928.1009201162458im
-29.354494961308443 + 928.1009201162458im
-21.358584482366716 + 0.0im
-9.446872682532018 + 0.0im
-5.415417142234835 + 0.0im
-3.5158065492012964 - 15.750279074009407im
-3.5158065492012964 + 15.750279074009407im
-3.405242872115318 - 2.228869205181562im
-3.405242872115318 + 2.228869205181562im
-3.1776970108237883 + 0.0im
-1.0 + 0.0im
-0.39349870582862 - 8.82930733740199im
-0.39349870582862 + 8.82930733740199im
-0.07411941090418851 + 0.0im
-0.001992211534244684 + 0.0im
-0.001001001001001001 + 0.0im
]
test39_eigvals = [
-100.45338295445788 + 0.0im
-21.35498851941608 + 0.0im
-10.0 + 0.0im
-5.463171684799997 + 0.0im
-4.91399966284345 - 4.485021668563468im
-4.91399966284345 + 4.485021668563468im
-3.149582560233519 + 0.0im
-0.34584314540905314 - 9.434904079322248im
-0.34584314540905314 + 9.434904079322248im
-0.09719979123415864 + 0.0im
-0.07412657316639956 + 0.0im
-0.002014246353738706 + 0.0im
-0.001001001001001001 + 0.0im
]
test40_eigvals = [
-99.94136467100992 + 0.0im
-21.355217306408555 + 0.0im
-11.231106548123972 - 9.575595271086215im
-11.231106548123972 + 9.575595271086215im
-5.4404844263731915 + 0.0im
-3.157024438013972 + 0.0im
-1.4915618476418233 - 4.836401833717598im
-1.4915618476418233 + 4.836401833717598im
-1.0 + 0.0im
-0.5592551382672033 - 9.485811647183036im
-0.5592551382672033 + 9.485811647183036im
-0.07411245925908164 + 0.0im
-0.0020142681327209597 + 0.0im
-0.001001001001001001 + 0.0im
]
|
proofpile-julia0005-42722 | {
"provenance": "014.jsonl.gz:242723"
} | module M
export f
f(x) = x
g(x = 1, y = 2, z = 3; kwargs...) = x
h(x::Int, y::Int = 2, z::Int = 3; kwargs...) = x
const A{T} = Union{Vector{T}, Matrix{T}}
h_1(x::A) = x
h_2(x::A{Int}) = x
h_3(x::A{T}) where {T} = x
i_1(x; y = x) = x * y
i_2(x::Int; y = x) = x * y
i_3(x::T; y = x) where {T} = x * y
i_4(x; y::T = zero(T), z::U = zero(U)) where {T, U} = x + y + z
j_1(x, y) = x * y # two arguments, no keyword arguments
j_1(x; y = x) = x * y # one argument, one keyword argument
mutable struct T
a
b
c
end
struct K
K(; a = 1) = new()
end
abstract type AbstractType <: Integer end
struct CustomType{S, T <: Integer} <: Integer
end
primitive type BitType8 8 end
primitive type BitType32 <: Real 32 end
end
|
proofpile-julia0005-42723 | {
"provenance": "014.jsonl.gz:242724"
} | export CS_model
"""
CS_model(df, y, grouping, d, link)
Form the compound symmetric (CS) model for intercept only regression with the specified base distribution (d) and link function (link).
# Arguments
- `df`: A named `DataFrame`
- `y`: Ouctcome variable name of interest, specified as a `Symbol`.
This variable name must be present in `df`.
- `grouping`: Grouping or Clustering variable name of interest, specified as a `Symbol`.
This variable name must be present in `df`.
- `d`: Base `Distribution` of outcome from `Distributions.jl`.
- `link`: Canonical `Link` function of the base distribution specified in `d`, from `GLM.jl`.
# Optional Arguments
- `penalized`: Boolean to specify whether or not to add an L2 Ridge penalty on the variance parameter for the CS structured covariance.
One can turn this option on by specifying `penalized = true` to add this penalty for numerical stability. (default `penalized = false`).
"""
function CS_model(
df::DataFrame,
y::Symbol,
grouping::Symbol,
d::D,
link::Link;
penalized::Bool = false) where {D<:Normal, Link}
groups = unique(df[!, grouping])
n = length(groups)
gcs = Vector{GaussianCopulaCSObs{Float64}}(undef, n)
for (i, grp) in enumerate(groups)
gidx = df[!, grouping] .== grp
di = count(gidx)
Y = Float64.(df[gidx, y])
X = ones(di, 1)
gcs[i] = GaussianCopulaCSObs(Y, X)
end
gcm = GaussianCopulaCSModel(gcs; penalized = penalized);
return gcm
end
function CS_model(
df::DataFrame,
y::Symbol,
grouping::Symbol,
d::D,
link::Link;
penalized::Bool = false) where {D<:Union{Poisson, Bernoulli}, Link}
groups = unique(df[!, grouping])
n = length(groups)
gcs = Vector{GLMCopulaCSObs{Float64, D, Link}}(undef, n)
for (i, grp) in enumerate(groups)
gidx = df[!, grouping] .== grp
di = count(gidx)
Y = Float64.(df[gidx, y])
X = ones(di, 1)
gcs[i] = GLMCopulaCSObs(Y, X, d, link)
end
gcm = GLMCopulaCSModel(gcs; penalized = penalized);
return gcm
end
function CS_model(
df::DataFrame,
y::Symbol,
grouping::Symbol,
d::D,
link::Link;
penalized::Bool = false) where {D<:NegativeBinomial, Link}
groups = unique(df[!, grouping])
n = length(groups)
gcs = Vector{NBCopulaCSObs{Float64, D, Link}}(undef, n)
for (i, grp) in enumerate(groups)
gidx = df[!, grouping] .== grp
di = count(gidx)
Y = Float64.(df[gidx, y])
X = ones(di, 1)
gcs[i] = NBCopulaCSObs(Y, X, d, link)
end
gcm = NBCopulaCSModel(gcs; penalized = penalized);
return gcm
end
"""
CS_model(df, y, grouping, covariates, d, link; penalized = penalized)
Form the compound symmetric (CS) model for regression with the specified base distribution (d) and link function (link).
# Arguments
- `df`: A named `DataFrame`
- `y`: Ouctcome variable name of interest, specified as a `Symbol`.
This variable name must be present in `df`.
- `grouping`: Grouping or Clustering variable name of interest, specified as a `Symbol`.
This variable name must be present in `df`.
- `covariates`: Covariate names of interest as a vector of `Symbol`s.
Each variable name must be present in `df`.
- `d`: Base `Distribution` of outcome from `Distributions.jl`.
- `link`: Canonical `Link` function of the base distribution specified in `d`, from `GLM.jl`.
# Optional Arguments
- `penalized`: Boolean to specify whether or not to add an L2 Ridge penalty on the variance parameter for the CS structured covariance.
One can turn this option on by specifying `penalized = true` to add this penalty for numerical stability. (default `penalized = false`).
"""
function CS_model(
df::DataFrame,
y::Symbol,
grouping::Symbol,
covariates::Vector{Symbol},
d::D,
link::Link;
penalized::Bool = false) where {D<:Normal, Link}
groups = unique(df[!, grouping])
n = length(groups)
gcs = Vector{GaussianCopulaCSObs{Float64}}(undef, n)
for (i, grp) in enumerate(groups)
gidx = df[!, grouping] .== grp
di = count(gidx)
Y = Float64.(df[gidx, y])
X = ones(di, 1)
@inbounds for i in 1:length(covariates)
U = Float64.(df[gidx, covariates[i]])
X = hcat(X, U)
end
gcs[i] = GaussianCopulaCSObs(Y, X)
end
gcm = GaussianCopulaCSModel(gcs; penalized = penalized);
return gcm
end
function CS_model(
df::DataFrame,
y::Symbol,
grouping::Symbol,
covariates::Vector{Symbol},
d::D,
link::Link;
penalized::Bool = false) where {D<:Union{Poisson, Bernoulli}, Link}
groups = unique(df[!, grouping])
n = length(groups)
gcs = Vector{GLMCopulaCSObs{Float64, D, Link}}(undef, n)
for (i, grp) in enumerate(groups)
gidx = df[!, grouping] .== grp
di = count(gidx)
Y = Float64.(df[gidx, y])
X = ones(di, 1)
@inbounds for i in 1:length(covariates)
U = Float64.(df[gidx, covariates[i]])
X = hcat(X, U)
end
gcs[i] = GLMCopulaCSObs(Y, X, d, link)
end
gcm = GLMCopulaCSModel(gcs; penalized = penalized);
return gcm
end
function CS_model(
df::DataFrame,
y::Symbol,
grouping::Symbol,
covariates::Vector{Symbol},
d::D,
link::Link;
penalized::Bool = false) where {D<:NegativeBinomial, Link}
groups = unique(df[!, grouping])
n = length(groups)
gcs = Vector{NBCopulaCSObs{Float64, D, Link}}(undef, n)
for (i, grp) in enumerate(groups)
gidx = df[!, grouping] .== grp
di = count(gidx)
Y = Float64.(df[gidx, y])
X = ones(di, 1)
@inbounds for i in 1:length(covariates)
U = Float64.(df[gidx, covariates[i]])
X = hcat(X, U)
end
gcs[i] = NBCopulaCSObs(Y, X, d, link)
end
gcm = NBCopulaCSModel(gcs; penalized = penalized);
return gcm
end
|
proofpile-julia0005-42724 | {
"provenance": "014.jsonl.gz:242725"
} | include("../src/AsymptoticBoltzmann.jl")
using Plots
function mx_scan(mxs, mv, gvxx, eps)
rds = Array{Float64,2}(undef, length(mxs), 3)
Threads.@threads for i = 1:length(mxs)
mx = mxs[i]
model = AsymptoticBoltzmann.KineticMixing(mx, mv, gvxx, eps)
try
rds[i, 1] = AsymptoticBoltzmann.relic_density(
model,
AsymptoticBoltzmann.ODE(
xstart = 1,
xend = 5e4,
reltol = 1e-14,
abstol = 2e-19,
),
)
catch
rds[i, 1] = NaN
end
try
rds[i, 2] = AsymptoticBoltzmann.relic_density(
model,
AsymptoticBoltzmann.MPU(),
)
catch
rds[i, 2] = NaN
end
try
if 2mx < mv
α = max((mv - 2mx) / mx, 0.0)
else
α = 2 * (mv - mx) / mx
end
rds[i, 3] = AsymptoticBoltzmann.relic_density(
model,
AsymptoticBoltzmann.MPU(α = max(α, 0.0)),
)
catch
rds[i, 3] = NaN
end
end
rds
end
rds = begin
MV = 1e3
EPS = 1e-3
GVXX = 1.0
rs = 10 .^ LinRange(log10(0.1), log10(1.2), 500)
mxs = rs .* MV
mx_scan(mxs, MV, GVXX, EPS)
end
xfs = begin
MV = 1e3
EPS = 1e-3
GVXX = 1.0
rs = 10 .^ LinRange(log10(0.1), log10(1.2), 500)
mxs = rs .* MV
_xfs = zeros(eltype(mxs), length(mxs))
for (i, mx) in enumerate(mxs)
model = AsymptoticBoltzmann.KineticMixing(mx, MV, GVXX, EPS)
_xfs[i] = AsymptoticBoltzmann.compute_xf(model)
end
_xfs
end
tcs = begin
MV = 1e3
EPS = 1e-3
GVXX = 1.0
MX = MV / 2 * 1.1
model = AsymptoticBoltzmann.KineticMixing(MX, MV, GVXX, EPS)
xs = 10 .^ LinRange(-1.0, 2.0, 500)
_tcs = zeros(eltype(mxs), length(mxs))
for (i,x) in enumerate(xs)
_tcs[i] = AsymptoticBoltzmann.dm_thermal_cross_section(x,model)
end
_tcs
end
cs = begin
MV = 1e3
EPS = 1e-3
GVXX = 1.0
MX = 100.0
model = AsymptoticBoltzmann.KineticMixing(MX, MV, GVXX, EPS)
Qs = 10 .^ LinRange(log10(2.001*MX), log10(50*MX), 500)
_cs = zeros(eltype(mxs), length(mxs))
for (i,Q) in enumerate(Qs)
_cs[i] = AsymptoticBoltzmann.dm_annihilation_cross_section(Q,model)
end
_cs
end
log.(rds[:, 1])
plot(rs, abs.(rds[:, 1] - rds[:, 2]) ./ rds[:, 1], yaxis = :log)
plot!(rs, abs.(rds[:, 1] - rds[:, 3]) ./ rds[:, 1], yaxis = :log)
plot(rs, log.(rds[:, 1]))
@show log.(rds[:, 1])
@show cs
|
proofpile-julia0005-42725 | {
"provenance": "014.jsonl.gz:242726"
} | function build_adiabatic_frame_evolution(hp, stage; polaron=false)
ilvl = init_lvl(hp)
gap_lvl = ilvl == 1 ? 1 : 2
s_axis = range(0, 1, length=1001)
H = build_H(hp, stage, polaron=polaron)
# calculate the position of crossing
sm, = min_gap(H, gap_lvl, gap_lvl + 1)
dH = build_dH(hp, stage, polaron=polaron)
coupling = build_couplings(polaron=polaron)
if stage == 1
inds = sm < 0.5 ? findlast((x) -> x < sm, s_axis) : nothing
proj = project_to_lowlevel(H, s_axis, coupling, dH, lvl=5, direction=:backward)
adiabatic_H = build_adiabatic_H(proj)
couplings = interp_couplings(proj, inds, sm, polaron, stage)
elseif stage == 2
w, v = eigen_decomp(H, 0.0, lvl=5)
adiabatic_H = AdiabaticFrameHamiltonian((s) -> w[1:4], nothing)
couplings = rotate_couplings(coupling, v[:, 1:4], polaron)
elseif stage == 3
inds = sm > 0.5 ? findlast((x) -> x < sm, s_axis) : nothing
proj = project_to_lowlevel(H, s_axis, coupling, dH, lvl=5)
adiabatic_H = build_adiabatic_H(proj)
couplings = interp_couplings(proj, inds, sm, polaron, stage)
else
throw(ArgumentError("Stage $stage not defined."))
end
cb = build_diabatic_callback(sm, gap_lvl, stage)
adiabatic_H, couplings, cb
end
function build_diabatic_callback(sm, swap_lvl, stage)
if stage == 1
if sm < 0.5
op = swap_operator(swap_lvl)
cb = InstPulseCallback([2 * τ1 * sm], (c, i) -> c .= op * c * op')
else
cb = nothing
end
elseif stage == 2
cb = nothing
elseif stage == 3
if sm > 0.5
op = swap_operator(swap_lvl)
cb = InstPulseCallback([2 * τ1 * sm], (c, i) -> c .= op * c * op')
else
cb = nothing
end
else
throw(ArgumentError("Stage $stage not defined."))
end
cb
end
function build_adiabatic_H(proj)
D = construct_interpolations(proj.s, hcat(proj.ev...)[1:4, :], order=2)
Dfun(s) = [D(i, s) for i in 1:4]
AdiabaticFrameHamiltonian(Dfun, nothing)
end
function min_gap(H, i=1, j=2)
gap = function (s)
w, = eigen_decomp(H, s, lvl=4)
w[j] - w[i]
end
res1 = optimize(gap, 0, 0.5)
res2 = optimize(gap, 0.5, 1)
v, ind = findmin([Optim.minimum(res1), Optim.minimum(res2)])
Optim.minimizer([res1, res2][ind]), v
end
function swap_operator(swap_lvl)
a1 = zeros(4)
a2 = zeros(4)
a1[swap_lvl] = 1
a2[swap_lvl + 1] = 1
op = a1 * a2' + a2 * a1'
resi = [i for i = 1:4 if !(i in [swap_lvl, swap_lvl + 1])]
for i in resi
op[i, i] = 1
end
op
end |
proofpile-julia0005-42726 | {
"provenance": "014.jsonl.gz:242727"
} | type RandomSelectionDefinition <: SelectionDefinition; end
type RandomSelection <: Selection; end
"""
Composes a random selection operator from its definition.
"""
compose!(d::RandomSelectionDefinition) = RandomSelection()
"""
Random selection operates by selecting individuals at random with
replacement.
"""
random() = RandomSelectionDefinition()
select_ids{F}(::RandomSelection, ::FitnessScheme, ::Vector{Tuple{Int, F}}, n::Int) =
rand(1:length(ic.fitnesses), n)
|
proofpile-julia0005-42727 | {
"provenance": "014.jsonl.gz:242728"
} | import CLBLAS
import OpenCL
const clblas = CLBLAS
clblas.setup()
const cl = OpenCL
device, ctx, queue = clblas.get_next_compute_context()
N = unsigned(5)
data = Array(Float32, 5)
data[1] = 1.1
data[2] = 2.22
data[3] = 3.333
data[4] = 4.4444
data[5] = 5.55555
bufX = cl.Buffer(Float32, ctx, (:r, :copy),hostbuf=data)
bufAsum = cl.Buffer(Float32, ctx, :w, length(bufX))
scratchBuff = cl.Buffer(Float32, ctx, :rw, length(bufX))
offx = unsigned(0)
incx = cl.cl_int(1)
ncq = cl.cl_uint(1)
clq = queue.id
newl = cl.cl_uint(0)
event = cl.UserEvent(ctx, retain=true)
ptrEvent = [event.id]
clblas.clblasSasum( N, bufAsum.id, 0, bufX.id, 0, incx, scratchBuff.id,
ncq, [clq], newl, C_NULL, ptrEvent);
cl.api.clWaitForEvents(cl.cl_uint(1), ptrEvent)
result = Array(Float32, length(1))
cl.enqueue_read_buffer(queue, bufAsum, result, unsigned(0), nothing, true)
expected = data[1:end] |> sum
println(result)
println("------------")
println(expected)
try
@assert(isapprox(norm(expected - result[1]), zero(Float32)))
info("success!")
finally
clblas.teardown()
end
|
proofpile-julia0005-42728 | {
"provenance": "014.jsonl.gz:242729"
} | # ---
# jupyter:
# jupytext:
# formats: ipynb,jl:hydrogen
# text_representation:
# extension: .jl
# format_name: hydrogen
# format_version: '1.3'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Julia 1.7.0
# language: julia
# name: julia-1.7
# ---
# %%
struct Foo{A, B}
a::A
b::B
end
# %%
methods(Foo)
# %%
methods(Foo{Int, Int})
# %%
foo = Foo(1.2, 34)
# %%
mult(x::Foo{<:Number, <:Number}) = x.a * x.b
# %%
methods(mult)
# %%
mult(Foo(2, 3 + 4im))
# %%
mult(x::Foo{<:Integer, <:AbstractString}) = x.b ^ x.a
# %%
methods(mult)
# %%
mult(Foo(10, "hoge! "))
# %%
|
proofpile-julia0005-42729 | {
"provenance": "014.jsonl.gz:242730"
} | using Conda
#===============================================================================
NOTE: pyxid is a conda package I built from a PyPI package,
following the directions at https://conda.io/docs/build_tutorials/pkgs.html.
This process requried one small modification for the package to work on windows.
I had to change the file meta.yaml generated by `conda skeleton pypi pyxid`
to contain the following.
preserve_egg_dir: True
===============================================================================#
Conda.add_channel("haberdashPI"); Conda.add("pyxid")
|
proofpile-julia0005-42730 | {
"provenance": "014.jsonl.gz:242731"
} | # This file was generated by the Julia Swagger Code Generator
# Do not modify this file directly. Modify the swagger specification instead.
mutable struct ApplicationGatewayFrontendIPConfigurationPropertiesFormat <: SwaggerModel
privateIPAddress::Any # spec type: Union{ Nothing, String } # spec name: privateIPAddress
privateIPAllocationMethod::Any # spec type: Union{ Nothing, String } # spec name: privateIPAllocationMethod
subnet::Any # spec type: Union{ Nothing, SubResource } # spec name: subnet
publicIPAddress::Any # spec type: Union{ Nothing, SubResource } # spec name: publicIPAddress
provisioningState::Any # spec type: Union{ Nothing, String } # spec name: provisioningState
function ApplicationGatewayFrontendIPConfigurationPropertiesFormat(;privateIPAddress=nothing, privateIPAllocationMethod=nothing, subnet=nothing, publicIPAddress=nothing, provisioningState=nothing)
o = new()
validate_property(ApplicationGatewayFrontendIPConfigurationPropertiesFormat, Symbol("privateIPAddress"), privateIPAddress)
setfield!(o, Symbol("privateIPAddress"), privateIPAddress)
validate_property(ApplicationGatewayFrontendIPConfigurationPropertiesFormat, Symbol("privateIPAllocationMethod"), privateIPAllocationMethod)
setfield!(o, Symbol("privateIPAllocationMethod"), privateIPAllocationMethod)
validate_property(ApplicationGatewayFrontendIPConfigurationPropertiesFormat, Symbol("subnet"), subnet)
setfield!(o, Symbol("subnet"), subnet)
validate_property(ApplicationGatewayFrontendIPConfigurationPropertiesFormat, Symbol("publicIPAddress"), publicIPAddress)
setfield!(o, Symbol("publicIPAddress"), publicIPAddress)
validate_property(ApplicationGatewayFrontendIPConfigurationPropertiesFormat, Symbol("provisioningState"), provisioningState)
setfield!(o, Symbol("provisioningState"), provisioningState)
o
end
end # type ApplicationGatewayFrontendIPConfigurationPropertiesFormat
const _property_map_ApplicationGatewayFrontendIPConfigurationPropertiesFormat = Dict{Symbol,Symbol}(Symbol("privateIPAddress")=>Symbol("privateIPAddress"), Symbol("privateIPAllocationMethod")=>Symbol("privateIPAllocationMethod"), Symbol("subnet")=>Symbol("subnet"), Symbol("publicIPAddress")=>Symbol("publicIPAddress"), Symbol("provisioningState")=>Symbol("provisioningState"))
const _property_types_ApplicationGatewayFrontendIPConfigurationPropertiesFormat = Dict{Symbol,String}(Symbol("privateIPAddress")=>"String", Symbol("privateIPAllocationMethod")=>"String", Symbol("subnet")=>"SubResource", Symbol("publicIPAddress")=>"SubResource", Symbol("provisioningState")=>"String")
Base.propertynames(::Type{ ApplicationGatewayFrontendIPConfigurationPropertiesFormat }) = collect(keys(_property_map_ApplicationGatewayFrontendIPConfigurationPropertiesFormat))
Swagger.property_type(::Type{ ApplicationGatewayFrontendIPConfigurationPropertiesFormat }, name::Symbol) = Union{Nothing,eval(Meta.parse(_property_types_ApplicationGatewayFrontendIPConfigurationPropertiesFormat[name]))}
Swagger.field_name(::Type{ ApplicationGatewayFrontendIPConfigurationPropertiesFormat }, property_name::Symbol) = _property_map_ApplicationGatewayFrontendIPConfigurationPropertiesFormat[property_name]
const _allowed_ApplicationGatewayFrontendIPConfigurationPropertiesFormat_privateIPAllocationMethod = ["Static", "Dynamic"]
function check_required(o::ApplicationGatewayFrontendIPConfigurationPropertiesFormat)
true
end
function validate_property(::Type{ ApplicationGatewayFrontendIPConfigurationPropertiesFormat }, name::Symbol, val)
if name === Symbol("privateIPAllocationMethod")
Swagger.validate_param(name, "ApplicationGatewayFrontendIPConfigurationPropertiesFormat", :enum, val, _allowed_ApplicationGatewayFrontendIPConfigurationPropertiesFormat_privateIPAllocationMethod)
end
end
|
proofpile-julia0005-42731 | {
"provenance": "014.jsonl.gz:242732"
} | using Documenter, Distance
makedocs(sitename="Distance.jl")
|
proofpile-julia0005-42732 | {
"provenance": "014.jsonl.gz:242733"
} | module PowerModelsWildfire
import InfrastructureModels
import InfrastructureModels: nw_id_default
import PowerModels
import PowerModelsRestoration
import JuMP
import MathOptInterface
import Memento
const _MOI = MathOptInterface
const _PM = PowerModels
const _PMR = PowerModelsRestoration
const _IM = InfrastructureModels
include("core/variable.jl")
include("core/constraint_template.jl")
include("core/data.jl")
include("form/dcp.jl")
include("prob/ops.jl")
include("util/risk_heuristic.jl")
include("core/export.jl")
end
|
proofpile-julia0005-42733 | {
"provenance": "014.jsonl.gz:242734"
} | using EditorUtils
using Test
@testset "All Tests" begin
@testset "capitalize strings" begin
@test capitalize("hello") == "Hello"
@test capitalize("HELLO") == "Hello"
@test capitalize("heLLo") == "Hello"
@test capitalize("HELLO_WORLD") == "Hello_world"
@test capitalize.(["hello", "world"]) == ["Hello", "World"]
end
@testset "camel case to snake case" begin
@test snake_case("hello") == "hello"
@test snake_case("HelloWorld") == "hello_world"
@test snake_case("helloWorld") == "hello_world"
end
@testset "snake case to camel case" begin
@test snake_case("hello") == "hello"
@test snake_case("HelloWorld") == "hello_world"
end
end |
proofpile-julia0005-42734 | {
"provenance": "014.jsonl.gz:242735"
} |
export BrickletIndustrialQuadRelayV2Monoflop
struct BrickletIndustrialQuadRelayV2Monoflop
value::Bool
time::Integer
time_remaining::Integer
end
export BrickletIndustrialQuadRelayV2SPITFPErrorCount
struct BrickletIndustrialQuadRelayV2SPITFPErrorCount
error_count_ack_checksum::Integer
error_count_message_checksum::Integer
error_count_frame::Integer
error_count_overflow::Integer
end
export BrickletIndustrialQuadRelayV2Identity
struct BrickletIndustrialQuadRelayV2Identity
uid::String
connected_uid::String
position::Char
hardware_version::Vector{Integer}
firmware_version::Vector{Integer}
device_identifier::Integer
end
export BrickletIndustrialQuadRelayV2
"""
4 galvanically isolated solid state relays
"""
mutable struct BrickletIndustrialQuadRelayV2 <: TinkerforgeDevice
ipcon::IPConnection
deviceInternal::PyObject
"""
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
"""
function BrickletIndustrialQuadRelayV2(uid::String, ipcon::IPConnection)
package = pyimport("tinkerforge.bricklet_industrial_quad_relay_v2")
deviceInternal = package.BrickletIndustrialQuadRelayV2(uid, ipcon.ipconInternal)
return new(ipcon, deviceInternal)
end
end
export set_value
"""
$(SIGNATURES)
Sets the value of all four relays. A value of *true* closes the
relay and a value of *false* opens the relay.
Use :func:`Set Selected Value` to only change one relay.
All running monoflop timers will be aborted if this function is called.
"""
function set_value(device::BrickletIndustrialQuadRelayV2, value)
device.deviceInternal.set_value(value)
end
export get_value
"""
$(SIGNATURES)
Returns the values as set by :func:`Set Value`.
"""
function get_value(device::BrickletIndustrialQuadRelayV2)
return device.deviceInternal.get_value()
end
export set_monoflop
"""
$(SIGNATURES)
Configures a monoflop of the specified channel.
The second parameter is the desired value of the specified
channel. A *true* means relay closed and a *false* means relay open.
The third parameter indicates the time that the channels should hold
the value.
If this function is called with the parameters (0, 1, 1500) channel 0 will
close and in 1.5s channel 0 will open again
A monoflop can be used as a fail-safe mechanism. For example: Lets assume you
have a RS485 bus and a Industrial Quad Relay Bricklet 2.0 connected to one of
the slave stacks. You can now call this function every second, with a time
parameter of two seconds and channel 0 closed. Channel 0 will be closed all the
time. If now the RS485 connection is lost, then channel 0 will be opened in at
most two seconds.
"""
function set_monoflop(device::BrickletIndustrialQuadRelayV2, channel, value, time)
device.deviceInternal.set_monoflop(channel, value, time)
end
export get_monoflop
"""
$(SIGNATURES)
Returns (for the given channel) the current value and the time as set by
:func:`Set Monoflop` as well as the remaining time until the value flips.
If the timer is not running currently, the remaining time will be returned
as 0.
"""
function get_monoflop(device::BrickletIndustrialQuadRelayV2, channel)
return device.deviceInternal.get_monoflop(channel)
end
export set_selected_value
"""
$(SIGNATURES)
Sets the output value of the specified channel without affecting the other
channels.
A running monoflop timer for the specified channel will be aborted if this
function is called.
"""
function set_selected_value(device::BrickletIndustrialQuadRelayV2, channel, value)
device.deviceInternal.set_selected_value(channel, value)
end
export set_channel_led_config
"""
$(SIGNATURES)
Each channel has a corresponding LED. You can turn the LED off, on or show a
heartbeat. You can also set the LED to "Channel Status". In this mode the
LED is on if the channel is high and off otherwise.
"""
function set_channel_led_config(device::BrickletIndustrialQuadRelayV2, channel, config)
device.deviceInternal.set_channel_led_config(channel, config)
end
export get_channel_led_config
"""
$(SIGNATURES)
Returns the channel LED configuration as set by :func:`Set Channel LED Config`
"""
function get_channel_led_config(device::BrickletIndustrialQuadRelayV2, channel)
return device.deviceInternal.get_channel_led_config(channel)
end
export get_spitfp_error_count
"""
$(SIGNATURES)
Returns the error count for the communication between Brick and Bricklet.
The errors are divided into
* ACK checksum errors,
* message checksum errors,
* framing errors and
* overflow errors.
The errors counts are for errors that occur on the Bricklet side. All
Bricks have a similar function that returns the errors on the Brick side.
"""
function get_spitfp_error_count(device::BrickletIndustrialQuadRelayV2)
return device.deviceInternal.get_spitfp_error_count()
end
export set_bootloader_mode
"""
$(SIGNATURES)
Sets the bootloader mode and returns the status after the requested
mode change was instigated.
You can change from bootloader mode to firmware mode and vice versa. A change
from bootloader mode to firmware mode will only take place if the entry function,
device identifier and CRC are present and correct.
This function is used by Brick Viewer during flashing. It should not be
necessary to call it in a normal user program.
"""
function set_bootloader_mode(device::BrickletIndustrialQuadRelayV2, mode)
return device.deviceInternal.set_bootloader_mode(mode)
end
export get_bootloader_mode
"""
$(SIGNATURES)
Returns the current bootloader mode, see :func:`Set Bootloader Mode`.
"""
function get_bootloader_mode(device::BrickletIndustrialQuadRelayV2)
return device.deviceInternal.get_bootloader_mode()
end
export set_write_firmware_pointer
"""
$(SIGNATURES)
Sets the firmware pointer for :func:`Write Firmware`. The pointer has
to be increased by chunks of size 64. The data is written to flash
every 4 chunks (which equals to one page of size 256).
This function is used by Brick Viewer during flashing. It should not be
necessary to call it in a normal user program.
"""
function set_write_firmware_pointer(device::BrickletIndustrialQuadRelayV2, pointer)
device.deviceInternal.set_write_firmware_pointer(pointer)
end
export write_firmware
"""
$(SIGNATURES)
Writes 64 Bytes of firmware at the position as written by
:func:`Set Write Firmware Pointer` before. The firmware is written
to flash every 4 chunks.
You can only write firmware in bootloader mode.
This function is used by Brick Viewer during flashing. It should not be
necessary to call it in a normal user program.
"""
function write_firmware(device::BrickletIndustrialQuadRelayV2, data)
return device.deviceInternal.write_firmware(data)
end
export set_status_led_config
"""
$(SIGNATURES)
Sets the status LED configuration. By default the LED shows
communication traffic between Brick and Bricklet, it flickers once
for every 10 received data packets.
You can also turn the LED permanently on/off or show a heartbeat.
If the Bricklet is in bootloader mode, the LED is will show heartbeat by default.
"""
function set_status_led_config(device::BrickletIndustrialQuadRelayV2, config)
device.deviceInternal.set_status_led_config(config)
end
export get_status_led_config
"""
$(SIGNATURES)
Returns the configuration as set by :func:`Set Status LED Config`
"""
function get_status_led_config(device::BrickletIndustrialQuadRelayV2)
return device.deviceInternal.get_status_led_config()
end
export get_chip_temperature
"""
$(SIGNATURES)
Returns the temperature as measured inside the microcontroller. The
value returned is not the ambient temperature!
The temperature is only proportional to the real temperature and it has bad
accuracy. Practically it is only useful as an indicator for
temperature changes.
"""
function get_chip_temperature(device::BrickletIndustrialQuadRelayV2)
return device.deviceInternal.get_chip_temperature()
end
export reset
"""
$(SIGNATURES)
Calling this function will reset the Bricklet. All configurations
will be lost.
After a reset you have to create new device objects,
calling functions on the existing ones will result in
undefined behavior!
"""
function reset(device::BrickletIndustrialQuadRelayV2)
device.deviceInternal.reset()
end
export write_uid
"""
$(SIGNATURES)
Writes a new UID into flash. If you want to set a new UID
you have to decode the Base58 encoded UID string into an
integer first.
We recommend that you use Brick Viewer to change the UID.
"""
function write_uid(device::BrickletIndustrialQuadRelayV2, uid)
device.deviceInternal.write_uid(uid)
end
export read_uid
"""
$(SIGNATURES)
Returns the current UID as an integer. Encode as
Base58 to get the usual string version.
"""
function read_uid(device::BrickletIndustrialQuadRelayV2)
return device.deviceInternal.read_uid()
end
export get_identity
"""
$(SIGNATURES)
Returns the UID, the UID where the Bricklet is connected to,
the position, the hardware and firmware version as well as the
device identifier.
The position can be 'a', 'b', 'c', 'd', 'e', 'f', 'g' or 'h' (Bricklet Port).
A Bricklet connected to an :ref:`Isolator Bricklet <isolator_bricklet>` is always at
position 'z'.
The device identifier numbers can be found :ref:`here <device_identifier>`.
|device_identifier_constant|
"""
function get_identity(device::BrickletIndustrialQuadRelayV2)
return device.deviceInternal.get_identity()
end
export register_callback
"""
Registers the given *function* with the given *callback_id*.
"""
function register_callback(device::BrickletIndustrialQuadRelayV2, callback_id, function_)
if isnothing(function_)
device.registered_callbacks.pop(callback_id, None)
else
device.registered_callbacks[callback_id] = function_
end
end
|
proofpile-julia0005-42735 | {
"provenance": "014.jsonl.gz:242736"
} | import VoronoiCells
import Deldir
using GeometryBasics
using Plots
# Number of generators: Deldir does not go all the way
Nsmall = [1000; 2000:2000:30_000]
Nbig = 40_000:10_000:100_000
#= Nsmall = [1000; 2000:2000:10_000] =#
#= Nbig = [10_000, 20_000] =#
N = vcat(Nsmall, Nbig)
VCtime = Vector{Float64}(undef, length(N))
Dtime = similar(VCtime)
fill!(Dtime, NaN)
# Compile methods
Deldir.voronoiarea(rand(8), rand(8))
VoronoiCells.voronoicells(rand(8), rand(8), VoronoiCells.Rectangle(Point2(0, 0), Point2(1, 1)))
# Simulation window
W = [0.0 ; 10.0 ; 0.0 ; 10.0]
rect = VoronoiCells.Rectangle(Point2(0, 0), Point2(10, 10))
for (idx, n) in enumerate(N)
println(n)
local x = W[1] .+ W[2]*rand(n)
local y = W[3] .+ W[4]*rand(n)
if n <= Nsmall[end]
Dtime[idx] = @elapsed Deldir.voronoiarea(x, y, W)
end
local points = [Point2(x[k], y[k]) for k in 1:n]
VCtime[idx] = @elapsed begin
tess = VoronoiCells.voronoicells(points, rect)
VoronoiCells.voronoiarea(tess)
end
GC.gc()
end
# ------------------------------------------------------------
x = div.(N, 1000)
maxy = 5 * ceil(maximum(filter(!isnan, Dtime)) / 5)
scatter(x, Dtime, label = "Deldir")
scatter!(x, VCtime, label = "VoronoiCells")
plot!(xlabel = "number of points in 1000s", ylabel = "time in seconds", xticks = 0:20:100, yticks = 0:5:maxy)
plot!(tickfont = font(13), legendfont = font(12))
savefig("comparison.png")
|
proofpile-julia0005-42736 | {
"provenance": "014.jsonl.gz:242737"
} | module Mathy
export @$, @eduction
using Transducers
using Transducers: air
const _atmath_syntax = raw"""
@$ op dot_expr
@$ init op dot_expr
@$ op { dot_expr | filter_expr }
@$ init op { dot_expr | filter_expr }
"""
"""
$_atmath_syntax
Reduce dot call expression `dot_expr` [^dot_syntax] with binary
operator `op` without allocating any intermediate arrays. The result
of the `dot_expr` may be filtered by `filter_expr` using set-builder
like notation `{ dot_expr | filter_expr }`. Expression `dot_expr`
itself may contain the set-builder like notation. Optionally, the
initial value `init` for operator `op` can be specified.
For example, the expression `@\$ + { f.(a) | p(_) }` is equivalent to
the mathematical notation
```math
\\sum \\{ y \\in f.(a) \\,|\\, p(y) \\}
=
\\sum_{x \\in a \\,|\\, p(f(x))} f(x)
```
where predicate `p` is a function that takes the output of `f` and
returns a Boolean. In general `filter_expr` is evaluated by
"substituting" each output element of `dot_expr` to `_` in
`filter_expr`. Likewise, `@\$ * { f.(a) | p(_) }` is equivalent to
``\\prod \\{ y \\in f.(a) \\,|\\, p(y) \\}``.
!!! note
Although set-builder like notation is used, the order of
application of `op` is guaranteed to be left-to-right (`foldl`).
Thus, unlike the mathematical notation, operator `op` does not
have to be commutative or associative.
Under the hood, `@\$ init op { dot_expr | filter_expr }` is converted
to an expression that is conceptually equivalent to
```
mapfoldl(Filter(_ -> filter_expr), op, dot_expr, init=init, simd=true)
```
where `mapfoldl` and `Filter` are implemented in
[Transducers.jl](https://github.com/tkf/Transducers.jl). However, the
output of `dot_expr` is never "materialized"; each output element is
computed on-the-fly.
[^dot_syntax]:
Dot Syntax for Vectorizing Functions:
<https://docs.julialang.org/en/latest/manual/functions/#man-vectorized-1>
# Examples
```jldoctest
julia> using Mathy
julia> @\$ + (1:10) .^ 2
385
julia> ans == sum((1:10) .^ 2)
true
julia> @\$ 0.0 + (1:10) .^ 2
385.0
julia> @\$ 1000 + (1:10) .^ 2
1385
julia> @\$ + { (1:10) .^ 2 | isodd(_) }
165
julia> ans == sum(x for x in (1:10) .^ 2 if isodd(x))
true
julia> @\$ + { (1:10) .^ 2 | (_ + 2) % 3 == 0 }
259
julia> ans == sum(x for x in (1:10) .^ 2 if (x + 2) % 3 == 0)
true
julia> ∑ = +
∏ = *;
julia> @\$ ∑ { (1:10) .^ 2 | (_ + 2) % 3 == 0 }
259
julia> @\$ ∏ { (1:10) .^ 2 | (_ + 2) % 3 == 0 }
501760000
```
Curly braces can be nested (although it is not optimized yet as of
Transducers 0.2.1):
```jldoctest; setup = :(using Mathy)
julia> @\$ + { 1:10 | isodd(_) } .^ 2
165
julia> @\$ 0 + { 2 .* ({ 1:10 | isodd(_) } .^ 2 .+ 1) .- 1 | _ % 3 == 0 }
153
julia> ans == sum(filter(x -> x % 3 == 0, 2 .* (filter(isodd, 1:10) .^ 2 .+ 1) .- 1))
true
```
"""
macro ($)(args...)
esc(atmath_impl(args...))
end
function atmath_impl(expr::Expr)
@assert expr.head == :call
if length(expr.args) == 3
op, init, body = expr.args
return atmath_impl(init, op, body)
elseif length(expr.args) == 2
op, body = expr.args
return atmath_impl(op, body)
else
error("Unsupported expression ", expr, "\n",
"Supported syntax:\n",
_atmath_syntax)
end
end
atmath_impl(op, body) =
atmath_impl(Transducers.MissingInit(), op, body)
function compile_body(body)
if body.head === :braces
@assert length(body.args) == 1
parsed = parse_dot_expr(body.args[1])
if parsed isa Some
dot_expr, filter_expr = something(parsed)
xf_expr = compile_filter(filter_expr)
else
dot_expr = body.args[1]
xf_expr = Map(identity)
end
else
dot_expr = body
xf_expr = Map(identity)
end
return brace_to_eduction(dot_expr), xf_expr
end
function atmath_impl(init, op, body)
dot_expr, xf_expr = compile_body(body)
return quote
$mapfoldl($xf_expr, $op, $air.($dot_expr); init=$init, simd=true)
end
end
parse_dot_expr(x) = x
function parse_dot_expr(expr::Expr)
if expr.head === :call
if expr.args[1] == :|
@assert length(expr.args) == 3
_, l, r = expr.args
return Some((l, r))
elseif length(expr.args) === 3 # binary operator
op, x, y = expr.args
a = parse_dot_expr(x)
if a isa Some
l, r = something(a)
return Some((l, Expr(:call, op, r, y)))
end
b = parse_dot_expr(y)
if b isa Some
l, r = something(b)
return Some((Expr(:call, op, x, l), r))
end
return expr
else
return expr
end
elseif expr.head === :if
a1 = parse_dot_expr(expr.args[1])
if a1 isa Some
l, r = something(a1)
return Some((l, Expr(expr.head, r, expr.args[2:end]...)))
else
return expr
end
else
return expr
end
end
function compile_filter(filter_expr)
var = gensym("x")
subs(x) = x
subs(x::Expr) = Expr(x.head, subs.(x.args)...)
subs(x::Symbol) = x == :_ ? var : x
return :($Filter($var -> $(subs(filter_expr))))
end
"""
isdotcall(x)
# Examples
```jldoctest
julia> using Mathy: isdotcall
julia> isdotcall(:(f.(x)))
true
julia> isdotcall(:(f(x)))
false
julia> isdotcall(:(x .+ y)) # handled separately
false
```
"""
isdotcall(::Any) = false
isdotcall(ex::Expr) =
ex.head === :. && length(ex.args) == 2 && ex.args[2] isa Expr &&
ex.args[2].head === :tuple
"""
isdotbracecall(x)
# Examples
```jldoctest
julia> using Mathy: isdotbracecall
julia> isdotbracecall(:(f.{x}))
true
julia> isdotbracecall(:(f.(x)))
false
julia> isdotbracecall(:(f{x}))
false
```
"""
isdotbracecall(::Any) = false
isdotbracecall(ex::Expr) =
ex.head === :. && length(ex.args) == 2 && ex.args[2] isa Expr &&
ex.args[2].head === :quote && length(ex.args[2].args) == 1 &&
ex.args[2].args[1] isa Expr &&
ex.args[2].args[1].head == :braces && length(ex.args[2].args[1].args) == 1
brace_to_eduction(x) = x
function brace_to_eduction(ex::Expr)
if isdotcall(ex)
args = brace_to_eduction.(ex.args[2].args)
return Expr(:., ex.args[1], Expr(:tuple, args...))
#=
elseif isdotbracecall(ex)
return Expr(:., ex.args[1],
Expr(:tuple, brace_to_eduction(ex.args[2].args[1])))
=#
elseif ex.head === :braces
@assert length(ex.args) == 1
return :($(@__MODULE__).@eduction $ex)
elseif ex.head === :call
return Expr(:call, brace_to_eduction.(ex.args)...)
else
return ex
end
end
"""
@eduction dot_expr
@eduction { dot_expr | filter_expr }
Like [`@\$`](@ref) but without `op`.
# Examples
```jldoctest
julia> using Mathy
julia> collect(@eduction { (1:10) .^ 2 | isodd(_) })
5-element Array{Int64,1}:
1
9
25
49
81
```
"""
macro eduction(ex)
esc(ateduction_impl(ex))
end
function ateduction_impl(ex)
dot_expr, xf_expr = compile_body(ex)
return :($eduction($xf_expr, $air.($dot_expr)))
end
end # module
|
proofpile-julia0005-42737 | {
"provenance": "014.jsonl.gz:242738"
} | # canonical data version: 1.2.1
using Test
include("collatz-conjecture.jl")
# canonical data
@testset "Canonical data" begin
@test collatz_steps(1) == 0
@test collatz_steps(16) == 4
@test collatz_steps(12) == 9
@test collatz_steps(1000000) == 152
@test_throws DomainError collatz_steps(0)
@test_throws DomainError collatz_steps(-15)
end |
proofpile-julia0005-42738 | {
"provenance": "014.jsonl.gz:242739"
} | module ReusableResourceAllocation
using JuMP
using Gurobi
using LinearAlgebra
using MathOptInterface
export reusable_resource_allocation
function reusable_resource_allocation(
initial_supply::Array{<:Real,1},
supply::Array{<:Real,2},
demand::Array{<:Real,2},
adj_matrix::BitArray{2};
obj_dir::Symbol=:shortage,
send_new_only::Bool=false,
sendrecieve_switch_time::Int=0,
min_send_amt::Real=0,
smoothness_penalty::Real=0,
setup_cost::Real=0,
sent_penalty::Real=0,
verbose::Bool=false,
)
N, T = size(supply)
@assert(size(initial_supply, 1) == N)
@assert(size(demand, 1) == N)
@assert(size(demand, 2) == T)
@assert(size(adj_matrix, 1) == N)
@assert(size(adj_matrix, 2) == N)
@assert(obj_dir in [:shortage, :overflow])
model = Model(Gurobi.Optimizer)
if !verbose set_silent(model) end
@variable(model, sent[1:N,1:N,1:T])
@variable(model, obj_dummy[1:N,1:T] >= 0)
if min_send_amt <= 0
@constraint(model, sent .>= 0)
else
@constraint(model, [i=1:N,j=1:N,t=1:T], sent[i,j,t] in MOI.Semicontinuous(Float64(min_send_amt), Inf))
end
objective = @expression(model, sum(obj_dummy))
if sent_penalty > 0
add_to_expression!(objective, sent_penalty*sum(sent))
end
if smoothness_penalty > 0
@variable(model, smoothness_dummy[i=1:N,j=1:N,t=1:T-1] >= 0)
@constraint(model, [t=1:T-1], (sent[:,:,t] - sent[:,:,t+1]) .<= smoothness_dummy[:,:,t])
@constraint(model, [t=1:T-1], -(sent[:,:,t] - sent[:,:,t+1]) .<= smoothness_dummy[:,:,t])
add_to_expression!(objective, smoothness_penalty * sum(smoothness_dummy))
add_to_expression!(objective, smoothness_penalty * sum(sent[:,:,1]))
end
if setup_cost > 0
@variable(model, setup_dummy[i=1:N,j=i+1:N], Bin)
@constraint(model, [i=1:N,j=i+1:N], [1-setup_dummy[i,j], sum(sent[i,j,:])+sum(sent[j,i,:])] in MOI.SOS1([1.0, 1.0]))
add_to_expression!(objective, setup_cost*sum(setup_dummy))
end
@objective(model, Min, objective)
if send_new_only
@constraint(model, [t=1:T],
sum(sent[:,:,t], dims=2) .<= max.(0, supply[:,t])
)
else
@constraint(model, [i=1:N,t=1:T],
sum(sent[i,:,t]) <=
initial_supply[i]
+ sum(supply[i,1:t])
- sum(sent[i,:,1:t-1])
+ sum(sent[:,i,1:t-1])
)
end
for i = 1:N
for j = 1:N
if ~adj_matrix[i,j]
@constraint(model, sum(sent[i,j,:]) .== 0)
end
end
end
if sendrecieve_switch_time > 0
@constraint(model, [i=1:N,t=1:T-1],
[sum(sent[:,i,t]), sum(sent[i,:,t:min(t+sendrecieve_switch_time,T)])] in MOI.SOS1([1.0, 1.0])
)
@constraint(model, [i=1:N,t=1:T-1],
[sum(sent[:,i,t:min(t+sendrecieve_switch_time,T)]), sum(sent[i,:,t])] in MOI.SOS1([1.0, 1.0])
)
end
flip_sign = (obj_dir == :shortage) ? 1 : -1
z1, z2 = (obj_dir == :shortage) ? (0, -1) : (-1, 0)
@constraint(model, [i=1:N,t=1:T],
obj_dummy[i,t] >= flip_sign * (
demand[i,t] - (
initial_supply[i]
+ sum(supply[i,1:t])
- sum(sent[i,:,1:t+z1])
+ sum(sent[:,i,1:t+z2])
)
)
)
optimize!(model)
return model
end
end;
|
proofpile-julia0005-42739 | {
"provenance": "014.jsonl.gz:242740"
} | # The Dual Arrhenius and Michealis-Menten (DAMM) model, Davidson et al. 2012
"""
DAMM(x::VecOrMat{<: Real}, p::NTuple{7, Float64})
Calculate respiration as a function of soil temperature (Tₛ) and moisture (θ).
# Examples
```julia-repl
julia> df = DAMMfdata(100) # generates a fake dataset
100×3 DataFrame
Row │ Tₛ θ Rₛ
│ Float64 Float64 Float64
─────┼─────────────────────────────
1 │ 15.5 0.3 1.72216
2 │ 22.3 0.6 1.8213
⋮ │ ⋮ ⋮ ⋮
99 │ 9.5 0.2 0.223677
100 │ 6.6 0.6 0.730627
julia> fp # parameters: αₛₓ, Eaₛₓ, kMₛₓ, kMₒ₂, Sxₜₒₜ, Q10kM
(1.0e9, 64.0, 3.46e-8, 0.002, 0.7, 0.02, 1.0)
julia> DAMM(hcat(df.Tₛ, df.θ), fp) # μmolCO₂ m⁻² s⁻¹
100-element Vector{Float64}:
6.023429035220588
0.9298933641647085
⋮
0.8444248717855868
3.805243237387702
```
"""
function DAMM(x::VecOrMat{<: Real}, p::NTuple{7, Float64})
# Independent variables
Tₛ = x[:, 1]°C # Soil temperature
Tₛ = Tₛ .|> K # Tₛ in Kelvin
θ = x[:, 2]m^3*m^-3 # Soil moisture, m³ m⁻³
# Parameters
αₛₓ = p[1]mg*cm^-3*hr^-1 # Pre-exponential factor, mgC cm⁻³ h⁻¹
Eaₛₓ = p[2]kJ*mol^-1 # Activation energy, kJ mol⁻¹
kMₛₓ = p[3]g*cm^-3 # Michaelis constant, gC cm⁻³
kMₒ₂ = p[4]L*L^-1 # Michaelis constant for O₂, L L⁻¹
porosity = p[5] # 1 - soil buld density / soil particle density
Sxₜₒₜ = p[6]g*cm^-3 # Total soil carbon, gC cm⁻³
Q10Km = p[7]
# DAMM model
Tref = 293.15K
Vmax = @. αₛₓ * exp(-Eaₛₓ/(R * Tₛ)) # Maximum potential rate of respiration
Sₓ = @. pₛₓ * Sxₜₒₜ * Dₗᵢ * θ^3 # All soluble substrate, gC cm⁻³
MMₛₓ = @. Sₓ / (kMₛₓ * Q10Km^(ustrip(Tₛ - Tref)/10.0) + Sₓ) # Availability of substrate factor, 0-1
porosityₐᵢᵣ = porosity .- θ # Air filled porosity
porosityₐᵢᵣ[porosityₐᵢᵣ .< 0.0] .= NaN # Moisture cannot be greater than porosity
O₂ = @. Dₒₐ * O₂ₐ * (porosityₐᵢᵣ^(4/3)) # Oxygen concentration
MMₒ₂ = @. O₂ / (kMₒ₂ + O₂) # Oxygen limitation factor, 0-1
Rₛₘ = @. Vmax * MMₛₓ * MMₒ₂ # Respiration, mg C cm⁻³ hr⁻¹
Rₛₘₛ = @. Rₛₘ * Eₛ # Respiration, effective depth 10 cm, mg C cm⁻² hr⁻¹
Rₛₛₘ = Rₛₘₛ .|> molC*m^-2*s^-1
Rₛ = Rₛₛₘ .|> μmolCO₂*m^-2*s^-1
Rₛ = Float64.(ustrip(Rₛ)) # no units
return Rₛ
end
|
proofpile-julia0005-42740 | {
"provenance": "014.jsonl.gz:242741"
} | function input(prompt::AbstractString)
print(prompt)
return readline()
end
n = input("Upper bound for dimension 1: ") |>
x -> parse(Int, x)
m = input("Upper bound for dimension 2: ") |>
x -> parse(Int, x)
x = rand(n, m)
display(x)
x[3, 3] # overloads `getindex` generic function
x[3, 3] = 5.0 # overloads `setindex!` generic function
x::Matrix # `Matrix{T}` is an alias for `Array{T, 2}`
x = 0; gc() # Julia has no `del` command, rebind `x` and call the garbage collector
|
proofpile-julia0005-42741 | {
"provenance": "014.jsonl.gz:242742"
} | module TestXGBoost
using MLJ
using Test
using Pkg
Pkg.clone("https://github.com/dmlc/XGBoost.jl")
import XGBoost
function readlibsvm(fname::String, shape)
dmx = zeros(Float32, shape)
label = Float32[]
fi = open(fname, "r")
cnt = 1
for line in eachline(fi)
line = split(line, " ")
push!(label, parse(Float64, line[1]))
line = line[2:end]
for itm in line
itm = split(itm, ":")
dmx[cnt, parse(Int, itm[1]) + 1] = float(parse(Int, itm[2]))
end
cnt += 1
end
close(fi)
return (dmx, label)
end
train_X, train_Y = readlibsvm("../data/agaricus.txt.train", (6513, 126))
test_X, test_Y = readlibsvm("../data/agaricus.txt.test", (1611, 126))
dtrain = XGBoost.DMatrix("../data/agaricus.txt.train")
dtest = XGBoost.DMatrix("../data/agaricus.txt.test")
#import XGBoost_
bareboost = XGBoostRegressor(num_round=6)
bfit,= MLJ.fit(bareboost,1,train_X,train_Y)
num_round=6
bst = XGBoost.xgboost(train_X, num_round, label = train_Y)
bst_pred = XGBoost.predict(bst,test_X)
bst_DMatrix, = MLJ.fit(bareboost,1,dtrain)
bpredict_fulltree = MLJ.predict(bareboost,bfit,test_X)
bpredict_onetree = MLJ.predict(bareboost,bfit,test_X,ntree_limit=1)
bpredict_DMatrix = MLJ.predict(bareboost,bst_DMatrix,dtest)
@test bpredict_fulltree !=bpredict_onetree
@test bpredict_fulltree ≈ bpredict_DMatrix atol=0.000001
@test_logs (:warn,"updater has been changed to shotgun, the default option for booster=\"gblinear\"") XGBoostRegressor(num_round=1,booster="gblinear",updater="grow_colmaker")
@test_logs (:warn,"\n num_class has been changed to 2") XGBoostClassifier(eval_metric="mlogloss",objective="multi:softprob")
@test bst_pred ≈ bpredict_fulltree atol=0.000001
end
true
|
proofpile-julia0005-42742 | {
"provenance": "014.jsonl.gz:242743"
} | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
CubicVariogram(range=r, sill=s, nugget=n)
CubicVariogram(ball; sill=s, nugget=n)
A cubic variogram with range `r`, sill `s` and nugget `n`.
Optionally, use a custom metric `ball`.
"""
struct CubicVariogram{V,B} <: Variogram
sill::V
nugget::V
ball::B
end
CubicVariogram(ball; sill=1.0, nugget=zero(typeof(sill))) =
CubicVariogram(sill, nugget, ball)
CubicVariogram(; range=1.0, sill=1.0, nugget=zero(typeof(sill))) =
CubicVariogram(sill, nugget, MetricBall(range))
function (γ::CubicVariogram)(h::T) where {T}
r = radius(γ.ball)
s = γ.sill
n = γ.nugget
# constants
c1 = T(35) / T(4)
c2 = T(7) / T(2)
c3 = T(3) / T(4)
s1 = 7*(h/r)^2 - c1*(h/r)^3 + c2*(h/r)^5 - c3*(h/r)^7
s2 = T(1)
(h < r) * (s - n) * s1 +
(h ≥ r) * (s - n) * s2 +
(h > 0) * n
end
isstationary(::Type{<:CubicVariogram}) = true
|
proofpile-julia0005-42743 | {
"provenance": "014.jsonl.gz:242744"
} | #
# Parallel thread spliiter
#
splitter(first,nbatches,n) = first:nbatches:n
"""
```
reduce(output, output_threaded)
```
Functions to reduce the output of common options (vectors of numbers
and vectors of vectors). This function can be replacted by custom
reduction methods. It always must both receive the `output` variable
as a parameter, and return it at the end.
"""
reduce(output::Number, output_threaded::Vector{<:Number}) = sum(output_threaded)
function reduce(output::AbstractVector, output_threaded::AbstractVector{<:AbstractVector})
@. output = output_threaded[1]
for ibatch in 2:length(output_threaded)
@. output += output_threaded[ibatch]
end
return output
end
#
# Serial version for self-pairwise computations
#
function map_pairwise_serial!(
f::F, output, box::Box, cl::CellList{N,T};
show_progress::Bool=false
) where {F,N,T}
@unpack n_cells_with_real_particles = cl
show_progress && (p = Progress(n_cells_with_real_particles, dt=1))
ibatch = 1
for i in 1:n_cells_with_real_particles
cellᵢ = cl.cells[cl.cell_indices_real[i]]
output = inner_loop!(f, box, cellᵢ, cl, output, ibatch)
show_progress && next!(p)
end
return output
end
#
# Parallel version for self-pairwise computations
#
function map_pairwise_parallel!(
f::F1, output, box::Box, cl::CellList{N,T};
output_threaded=output_threaded,
reduce::F2=reduce,
show_progress::Bool=false
) where {F1,F2,N,T}
@unpack n_cells_with_real_particles = cl
show_progress && (p = Progress(n_cells_with_real_particles, dt=1))
nbatches = cl.nbatches.map_computation
@sync for ibatch in 1:nbatches
Threads.@spawn begin
for i in splitter(ibatch, nbatches, n_cells_with_real_particles)
cellᵢ = cl.cells[cl.cell_indices_real[i]]
output_threaded[ibatch] =
inner_loop!(f, box, cellᵢ, cl, output_threaded[ibatch], ibatch)
show_progress && next!(p)
end
end
end
output = reduce(output, output_threaded)
return output
end
"""
```
partition!(x::AbstractVector,by)
```
Internal function or structure - interface may change.
# Extended help
Function that reorders `x` vector by putting in the first positions the
elements with values satisfying `by(el)`. Returns the number of elements
that satisfy the condition.
"""
function partition!(by, x::AbstractVector)
iswap = 1
@inbounds for i in eachindex(x)
if by(x[i])
if iswap != i
x[iswap], x[i] = x[i], x[iswap]
end
iswap += 1
end
end
return iswap - 1
end
#
# Serial version for cross-interaction computations
#
function map_pairwise_serial!(
f::F, output, box::Box,
cl::CellListPair{N,T};
show_progress=show_progress
) where {F,N,T}
show_progress && (p = Progress(length(cl.ref), dt=0))
for i in eachindex(cl.ref)
output = inner_loop!(f, output, i, box, cl)
show_progress && next!(p)
end
return output
end
#
# Parallel version for cross-interaction computations
#
function map_pairwise_parallel!(
f::F1, output, box::Box,
cl::CellListPair{N,T};
output_threaded=output_threaded,
reduce::F2=reduce,
show_progress=show_progress
) where {F1,F2,N,T}
show_progress && (p = Progress(length(cl.ref), dt=1))
nbatches = cl.target.nbatches.map_computation
@sync for ibatch in 1:nbatches
Threads.@spawn begin
for i in splitter(ibatch, nbatches, length(cl.ref))
output_threaded[ibatch] = inner_loop!(f, output_threaded[ibatch], i, box, cl)
show_progress && next!(p)
end
end
end
output = reduce(output, output_threaded)
return output
end
#
# Inner loop for Orthorhombic cells is faster because we can guarantee that
# there are not repeated computations even if running over half of the cells.
#
function inner_loop!(
f,box::Box{TriclinicCell},cellᵢ,
cl::CellList{N,T},
output,
ibatch
) where {N,T}
@unpack cutoff, cutoff_sq, nc = box
for neighbor_cell in neighbor_cells(box)
jc_cartesian = cellᵢ.cartesian_index + neighbor_cell
jc_linear = cell_linear_index(nc,jc_cartesian)
# if cellⱼ is empty, cycle
if cl.cell_indices[jc_linear] == 0
continue
end
cellⱼ = cl.cells[cl.cell_indices[jc_linear]]
# same cell
if cellⱼ.linear_index == cellᵢ.linear_index
for i in 1:cellᵢ.n_particles
@inbounds pᵢ = cellᵢ.particles[i]
(!pᵢ.real) && continue
xpᵢ = pᵢ.coordinates
for j in 1:cellᵢ.n_particles
@inbounds pⱼ = cellᵢ.particles[j]
if pᵢ.index < pⱼ.index
xpⱼ = pⱼ.coordinates
d2 = norm_sqr(xpᵢ - xpⱼ)
if d2 <= cutoff_sq
output = f(xpᵢ, xpⱼ, pᵢ.index, pⱼ.index, d2, output)
end
end
end
end
# neighbor cells
else
# Vector connecting cell centers
Δc = cellⱼ.center - cellᵢ.center
Δc_norm = norm(Δc)
Δc = Δc / Δc_norm
pp = project_particles!(cl.projected_particles[ibatch],cellⱼ,cellᵢ,Δc,Δc_norm,box)
for i in 1:cellᵢ.n_particles
@inbounds pᵢ = cellᵢ.particles[i]
(!pᵢ.real) && continue
xpᵢ = pᵢ.coordinates
# Partition pp array according to the current projections. This
# is faster than it looks, because after the first partitioning, the
# array will be almost correct, and essentially the algorithm
# will only run over most elements. Avoiding this partitioning or
# trying to sort the array before always resulted to be much more
# expensive.
xproj = dot(xpᵢ - cellᵢ.center, Δc)
n = partition!(el -> abs(el.xproj - xproj) <= cutoff, pp)
for j in 1:n
@inbounds pⱼ = pp[j]
if pᵢ.index < pⱼ.index
xpⱼ = pⱼ.coordinates
d2 = norm_sqr(xpᵢ - xpⱼ)
if d2 <= cutoff_sq
output = f(xpᵢ, xpⱼ, pᵢ.index, pⱼ.index, d2, output)
end
end
end
end
end
end
return output
end
#
# Inner loop for Orthorhombic cells is faster because we can guarantee that
# there are not repeated computations even if running over half of the cells.
#
function inner_loop!(
f,box::Box{OrthorhombicCell},cellᵢ,
cl::CellList{N,T},
output,
ibatch
) where {N,T}
@unpack cutoff_sq = box
# loop over list of non-repeated particles of cell ic
for i in 1:cellᵢ.n_particles - 1
@inbounds pᵢ = cellᵢ.particles[i]
xpᵢ = pᵢ.coordinates
for j in i + 1:cellᵢ.n_particles
@inbounds pⱼ = cellᵢ.particles[j]
xpⱼ = pⱼ.coordinates
d2 = norm_sqr(xpᵢ - xpⱼ)
if d2 <= cutoff_sq
output = f(xpᵢ, xpⱼ, pᵢ.index, pⱼ.index, d2, output)
end
end
end
for jcell in neighbor_cells_forward(box)
jc_linear = cell_linear_index(box.nc,cellᵢ.cartesian_index + jcell)
if cl.cell_indices[jc_linear] != 0
cellⱼ = cl.cells[cl.cell_indices[jc_linear]]
output = cell_output!(f, box, cellᵢ, cellⱼ, cl, output, ibatch)
end
end
return output
end
function cell_output!(
f,
box::Box{OrthorhombicCell},
cellᵢ,
cellⱼ,
cl::CellList{N,T},
output,
ibatch
) where {N,T}
@unpack cutoff, cutoff_sq, nc = box
# Vector connecting cell centers
Δc = cellⱼ.center - cellᵢ.center
Δc_norm = norm(Δc)
Δc = Δc / Δc_norm
pp = project_particles!(cl.projected_particles[ibatch],cellⱼ,cellᵢ,Δc,Δc_norm,box)
if length(pp) == 0
return output
end
# Loop over particles of cell icell
for i in 1:cellᵢ.n_particles
@inbounds pᵢ = cellᵢ.particles[i]
xpᵢ = pᵢ.coordinates
xproj = dot(xpᵢ - cellᵢ.center, Δc)
# Partition pp array according to the current projections
n = partition!(el -> abs(el.xproj - xproj) <= cutoff, pp)
# Compute the interactions
for j in 1:n
@inbounds pⱼ = pp[j]
xpⱼ = pⱼ.coordinates
d2 = norm_sqr(xpᵢ - xpⱼ)
if d2 <= cutoff_sq
output = f(xpᵢ, xpⱼ, pᵢ.index, pⱼ.index, d2, output)
end
end
end
return output
end
"""
```
project_particles!(projected_particles,cellⱼ,cellᵢ,Δc,Δc_norm,box)
```
Internal function or structure - interface may change.
# Extended help
Projects all particles of the cell `cellⱼ` into unnitary vector `Δc` with direction
connecting the centers of `cellⱼ` and `cellᵢ`. Modifies `projected_particles`, and
returns a view of `projected particles, where only the particles for which
the projection on the direction of the cell centers still allows the particle
to be within the cutoff distance of any point of the other cell.
"""
function project_particles!(
projected_particles,cellⱼ,cellᵢ,
Δc,Δc_norm,box::Box{UnitCellType,N}
) where {UnitCellType,N}
if box.lcell == 1
margin = box.cutoff + Δc_norm/2 # half of the distance between centers
else
margin = box.cutoff*(1 + sqrt(N)/2) # half of the diagonal of the cutoff-box
end
iproj = 0
@inbounds for j in 1:cellⱼ.n_particles
pⱼ = cellⱼ.particles[j]
xproj = dot(pⱼ.coordinates - cellᵢ.center, Δc)
if abs(xproj) <= margin
iproj += 1
projected_particles[iproj] = ProjectedParticle(pⱼ.index, xproj, pⱼ.coordinates)
end
end
pp = @view(projected_particles[1:iproj])
return pp
end
#
# Inner loop of cross-interaction computations. If the two sets
# are large, this might(?) be improved by computing two separate
# cell lists and using projection and partitioning.
#
function inner_loop!(
f,output,i,box,
cl::CellListPair{N,T}
) where {N,T}
@unpack nc, cutoff_sq = box
xpᵢ = wrap_to_first(cl.ref[i], box)
ic = particle_cell(xpᵢ, box)
for neighbor_cell in neighbor_cells(box)
jc_cartesian = neighbor_cell + ic
jc_linear = cell_linear_index(nc,jc_cartesian)
# If cellⱼ is empty, cycle
if cl.target.cell_indices[jc_linear] == 0
continue
end
cellⱼ = cl.target.cells[cl.target.cell_indices[jc_linear]]
# loop over particles of cellⱼ
for j in 1:cellⱼ.n_particles
@inbounds pⱼ = cellⱼ.particles[j]
xpⱼ = pⱼ.coordinates
d2 = norm_sqr(xpᵢ - xpⱼ)
if d2 <= cutoff_sq
output = f(xpᵢ, xpⱼ, i, pⱼ.index, d2, output)
end
end
end
return output
end
|
proofpile-julia0005-42744 | {
"provenance": "014.jsonl.gz:242745"
} | using Observers
using Plots
export AQObserver
struct AQObserver <: Observer
snode::Int
actions::Vector{Any}
ns::Vector{Any}
Xs::Vector{Any}
ys::Vector{Any}
end
AQObserver(snode::Int) = AQObserver(snode, [], [], [], [])
function Observers.notify_observer!(o::AQObserver, b::ModularBandit; kwargs...)
snode = kwargs_get(kwargs, :snode)
o.snode == snode || return #only monitor snode
p = kwargs_get(kwargs, :planner)
tree = get(p.tree)
sanode = kwargs_get(kwargs, :sanode)
push!(o.actions, tree.a_labels[sanode])
children = tree.children[snode]
actions = [tree.a_labels[c] for c in children]
qs = [tree.q[c] for c in children]
ns = [tree.n[c] for c in children]
push!(o.Xs, actions)
push!(o.ys, qs)
push!(o.ns, ns)
end
@recipe function plot(o::AQObserver, i, ylim=nothing)
@series begin
seriestype := :scatter
xlabel := "action"
ylabel := "q"
title := "n=$i"
if ylim != nothing
ylim := ylim
end
x = o.Xs[i]
y = o.ys[i]
x, y
end
end
Base.length(o::AQObserver) = length(o.Xs)
function Plots.animate(o::AQObserver, fname="./aqobserver.gif"; fps=2, ylim=nothing)
anim = @animate for i=1:length(o)
Plots.plot(o, i, ylim=ylim)
end
gif(anim, fname; fps=fps)
end
|
proofpile-julia0005-42745 | {
"provenance": "014.jsonl.gz:242746"
} | const GRPC_STATIC_HEADERS = Ref{Ptr{Nothing}}(C_NULL)
const StatusCode = (
OK = (code=0, message="Success"),
CANCELLED = (code=1, message="The operation was cancelled"),
UNKNOWN = (code=2, message="Unknown error"),
INVALID_ARGUMENT = (code=3, message="Client specified an invalid argument"),
DEADLINE_EXCEEDED = (code=4, message="Deadline expired before the operation could complete"),
NOT_FOUND = (code=5, message="Requested entity was not found"),
ALREADY_EXISTS = (code=6, message="Entity already exists"),
PERMISSION_DENIED = (code=7, message="No permission to execute the specified operation"),
RESOURCE_EXHAUSTED = (code=8, message="Resource exhausted"),
FAILED_PRECONDITION = (code=9, message="Operation was rejected because the system is not in a state required for the operation's execution"),
ABORTED = (code=10, message="Operation was aborted"),
OUT_OF_RANGE = (code=11, message="Operation was attempted past the valid range"),
UNIMPLEMENTED = (code=12, message="Operation is not implemented or is not supported/enabled in this service"),
INTERNAL = (code=13, message="Internal error"),
UNAVAILABLE = (code=14, message="The service is currently unavailable"),
DATA_LOSS = (code=15, message="Unrecoverable data loss or corruption"),
UNAUTHENTICATED = (code=16, message="The request does not have valid authentication credentials for the operation")
)
grpc_status_info(code) = StatusCode[code+1]
grpc_status_message(code) = (grpc_status_info(code)).message
grpc_status_code_str(code) = string(propertynames(StatusCode)[code+1])
#=
const SEND_BUFFER_SZ = 1024 * 1024
function buffer_send_data(input::Channel{T}) where T <: ProtoType
data = nothing
if isready(input)
iob = IOBuffer()
while isready(input) && (iob.size < SEND_BUFFER_SZ)
write(iob, to_delimited_message_bytes(take!(input)))
yield()
end
data = take!(iob)
elseif isopen(input)
data = UInt8[]
end
data
end
=#
function send_data(easy::Curl.Easy, input::Channel{T}, max_send_message_length::Int) where T <: ProtoType
while true
yield()
data = isready(input) ? to_delimited_message_bytes(take!(input), max_send_message_length) : isopen(input) ? UInt8[] : nothing
easy.input === nothing && break
easy.input = data
Curl.curl_easy_pause(easy.handle, Curl.CURLPAUSE_CONT)
wait(easy.ready)
easy.input === nothing && break
easy.ready = Threads.Event()
end
end
function grpc_timeout_header_val(timeout::Real)
if round(Int, timeout) == timeout
timeout_secs = round(Int64, timeout)
return "$(timeout_secs)S"
end
timeout *= 1000
if round(Int, timeout) == timeout
timeout_millisecs = round(Int64, timeout)
return "$(timeout_millisecs)m"
end
timeout *= 1000
if round(Int, timeout) == timeout
timeout_microsecs = round(Int64, timeout)
return "$(timeout_microsecs)u"
end
timeout *= 1000
timeout_nanosecs = round(Int64, timeout)
return "$(timeout_nanosecs)n"
end
function grpc_headers(; timeout::Real=Inf)
headers = C_NULL
headers = LibCURL.curl_slist_append(headers, "User-Agent: $(Curl.USER_AGENT)")
headers = LibCURL.curl_slist_append(headers, "Content-Type: application/grpc+proto")
headers = LibCURL.curl_slist_append(headers, "Content-Length:")
headers = LibCURL.curl_slist_append(headers, "te: trailers")
if timeout !== Inf
headers = LibCURL.curl_slist_append(headers, "grpc-timeout: $(grpc_timeout_header_val(timeout))")
end
headers
end
function grpc_request_header(request_timeout::Real)
if request_timeout == Inf
GRPC_STATIC_HEADERS[]
else
grpc_headers(; timeout=request_timeout)
end
end
function easy_handle(maxage::Clong, keepalive::Clong, negotiation::Symbol, revocation::Bool, request_timeout::Real)
easy = Curl.Easy()
http_version = (negotiation === :http2) ? CURL_HTTP_VERSION_2_0 :
(negotiation === :http2_tls) ? CURL_HTTP_VERSION_2TLS :
(negotiation === :http2_prior_knowledge) ? CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE :
throw(ArgumentError("unsupported HTTP2 negotiation mode $negotiation"))
Curl.setopt(easy, CURLOPT_HTTP_VERSION, http_version)
Curl.setopt(easy, CURLOPT_PIPEWAIT, Clong(1))
Curl.setopt(easy, CURLOPT_POST, Clong(1))
Curl.setopt(easy, CURLOPT_HTTPHEADER, grpc_request_header(request_timeout))
if !revocation
Curl.setopt(easy, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NO_REVOKE)
end
if maxage > 0
Curl.setopt(easy, CURLOPT_MAXAGE_CONN, maxage)
end
if keepalive > 0
Curl.setopt(easy, CURLOPT_TCP_KEEPALIVE, Clong(1))
Curl.setopt(easy, CURLOPT_TCP_KEEPINTVL, keepalive);
Curl.setopt(easy, CURLOPT_TCP_KEEPIDLE, keepalive);
end
easy
end
function recv_data(easy::Curl.Easy, output::Channel{T}, max_recv_message_length::Int) where T <: ProtoType
iob = PipeBuffer()
waiting_for_header = true
msgsize = 0
compressed = UInt8(0)
datalen = UInt32(0)
need_more = true
for buf in easy.output
write(iob, buf)
need_more = false
while !need_more
if waiting_for_header
if bytesavailable(iob) >= 5
compressed = read(iob, UInt8) # compression
datalen = ntoh(read(iob, UInt32)) # message length
if datalen > max_recv_message_length
throw(gRPCMessageTooLargeException(max_recv_message_length, datalen))
end
waiting_for_header = false
else
need_more = true
end
end
if !waiting_for_header
if bytesavailable(iob) >= datalen
msgbytes = IOBuffer(view(iob.data, iob.ptr:(iob.ptr+datalen-1)))
put!(output, readproto(msgbytes, T())) # decode message bytes
iob.ptr += datalen
waiting_for_header = true
else
need_more = true
end
end
end
end
close(output)
end
function set_connect_timeout(easy::Curl.Easy, timeout::Real)
timeout >= 0 ||
throw(ArgumentError("timeout must be positive, got $timeout"))
if timeout ≤ typemax(Clong) ÷ 1000
timeout_ms = round(Clong, timeout * 1000)
Curl.setopt(easy, CURLOPT_CONNECTTIMEOUT_MS, timeout_ms)
else
timeout = timeout ≤ typemax(Clong) ? round(Clong, timeout) : Clong(0)
Curl.setopt(easy, CURLOPT_CONNECTTIMEOUT, timeout)
end
end
function grpc_request(downloader::Downloader, url::String, input::Channel{T1}, output::Channel{T2};
maxage::Clong = typemax(Clong),
keepalive::Clong = 60,
negotiation::Symbol = :http2_prior_knowledge,
revocation::Bool = true,
request_timeout::Real = Inf,
connect_timeout::Real = 0,
max_recv_message_length::Int = DEFAULT_MAX_RECV_MESSAGE_LENGTH,
max_send_message_length::Int = DEFAULT_MAX_SEND_MESSAGE_LENGTH,
verbose::Bool = false)::gRPCStatus where {T1 <: ProtoType, T2 <: ProtoType}
Curl.with_handle(easy_handle(maxage, keepalive, negotiation, revocation, request_timeout)) do easy
# setup the request
Curl.set_url(easy, url)
Curl.set_timeout(easy, request_timeout)
set_connect_timeout(easy, connect_timeout)
Curl.set_verbose(easy, verbose)
Curl.add_upload_callbacks(easy)
Downloads.set_ca_roots(downloader, easy)
# do the request
Curl.add_handle(downloader.multi, easy)
function cleanup()
Curl.remove_handle(downloader.multi, easy)
# though remove_handle sets easy.handle to C_NULL, it does not close output and progress channels
# we need to close them here to unblock anything waiting on them
close(easy.output)
close(easy.progress)
close(output)
close(input)
nothing
end
# do send recv data
if VERSION < v"1.5"
cleaned_up = false
exception = nothing
cleanup_once = (ex)->begin
if !cleaned_up
cleaned_up = true
exception = ex
cleanup()
end
end
@sync begin
@async try
recv_data(easy, output, max_recv_message_length)
catch ex
cleanup_once(ex)
end
@async try
send_data(easy, input, max_send_message_length)
catch ex
cleanup_once(ex)
end
end
if exception !== nothing
throw(exception)
end
else
try
Base.Experimental.@sync begin
@async recv_data(easy, output, max_recv_message_length)
@async send_data(easy, input, max_send_message_length)
end
finally # ensure handle is removed
cleanup()
end
end
@debug("response headers", easy.res_hdrs)
# parse the grpc headers
grpc_status = StatusCode.OK.code
grpc_message = ""
for hdr in easy.res_hdrs
if startswith(hdr, "grpc-status")
grpc_status = parse(Int, strip(last(split(hdr, ':'; limit=2))))
elseif startswith(hdr, "grpc-message")
grpc_message = string(strip(last(split(hdr, ':'; limit=2))))
end
end
if (easy.code == CURLE_OPERATION_TIMEDOUT) && (grpc_status == StatusCode.OK.code)
grpc_status = StatusCode.DEADLINE_EXCEEDED.code
end
if (grpc_status != StatusCode.OK.code) && isempty(grpc_message)
grpc_message = grpc_status_message(grpc_status)
end
if ((easy.code == CURLE_OK) && (grpc_status == StatusCode.OK.code))
gRPCStatus(true, grpc_status, "")
else
gRPCStatus(false, grpc_status, isempty(grpc_message) ? Curl.get_curl_errstr(easy) : grpc_message)
end
end
end
|
proofpile-julia0005-42746 | {
"provenance": "014.jsonl.gz:242747"
} | module SparsityDetection
using SpecialFunctions
using Cassette, LinearAlgebra, SparseArrays
using Cassette: tag, untag, Tagged, metadata, hasmetadata, istagged, canrecurse
using Cassette: tagged_new_tuple, ContextTagged, BindingMeta, DisableHooks, nametype
using Core: SSAValue
export Sparsity, jacobian_sparsity, hessian_sparsity, hsparsity, sparsity!
include("util.jl")
include("controlflow.jl")
include("propagate_tags.jl")
include("linearity.jl")
include("jacobian.jl")
include("hessian.jl")
include("blas.jl")
sparsity!(args...; kwargs...) = jacobian_sparsity(args...; kwargs...)
hsparsity(args...; kwargs...) = hessian_sparsity(args...; kwargs...)
end
|
proofpile-julia0005-42747 | {
"provenance": "014.jsonl.gz:242748"
} | export find_rotation_sets
struct rotation_element{F}
axis::Vector{F}
order::Int
end
function Base.:(==)(A::rotation_element, B::rotation_element)
issame_axis(A.axis, B.axis) &&
A.order == B.order
end
function issame_axis(a, b)
"""
Checks if two axes are equivalent
|a⋅b| = 1 assuming ||a|| = ||b|| = 1
"""
A = normalize(a)
B = normalize(b)
d = abs(A ⋅ B)
return isapprox(d, 1.0, atol=tol)
end
function rotation_set_intersection(rotation_set)
"""
Finds the intersection of the elements in the sets
within 'rotation_set'
"""
out = deepcopy(rotation_set[1])
len = size(rotation_set)[1]
if len > 2
for i = 1:size(rotation_set)[1]
intersect!(out, rotation_set[i])
end
end
return out
end
function find_rotation_sets(mol::Molecule, SEAs)
"""
Loop through SEAs to label each set, and find principal axis and potential rotation orders
as outlined in the flowchart in DOI: 10.1002/jcc.23493
Returns a list of lists with each SEA's potential rotations
"""
out_all_SEAs = []
for sea in SEAs
out_per_SEA = []
len = size(sea.set)[1]
if len < 2
sea.label = "Single Atom"
elseif len == 2
sea.label = "Linear"
sea.paxis = mol[sea.set[1]].xyz - mol[sea.set[2]].xyz
else
moit = eigenmoit(calcmoit([mol[i] for i in sea.set]))
Ia, Ib, Ic = moit[1]
Iav, Ibv, Icv = [moit[2][:,idx] for idx = 1:3]
if isapprox(Ia, Ib, atol=tol) && isapprox(Ia, Ic, atol=tol)
sea.label = "Spherical"
elseif isapprox(Ia+Ib, Ic, atol=tol)
paxis = Icv
if isapprox(Ia, Ib, atol=tol)
sea.label = "Regular Polygon"
sea.paxis = paxis
for i = 2:len
if isfactor(len, i)
re = rotation_element(paxis, i)
push!(out_per_SEA, re)
end
end
else
sea.label = "Irregular Polygon"
sea.paxis = paxis
for i = 2:len-1
if isfactor(len, i)
re = rotation_element(paxis, i)
push!(out_per_SEA, re)
end
end
end
else
if !(isapprox(Ia, Ib, atol=tol) || isapprox(Ib, Ic, atol = tol))
sea.label = "Asymmetric Rotor"
for i in [Iav, Ibv, Icv]
re = rotation_element(i, 2)
push!(out_per_SEA, re)
end
else
if Ia == Ib
paxis = Icv
sea.label = "Oblate Symmetric Top"
sea.paxis = paxis
else
paxis = Iav
sea.label = "Prolate Symmetric Top"
sea.paxis = paxis
end
k = div(len, 2)
for i = 2:k
if isfactor(k, i)
re = rotation_element(paxis, i)
push!(out_per_SEA, re)
end
end
end
end
push!(out_all_SEAs, out_per_SEA)
end
end
return out_all_SEAs
end
function isfactor(n, a)
"""
Simple function, is a a factor of n? Return true/false
"""
if n % a == 0
return true
else
return false
end
end
function find_rotations(mol::Molecule, rotation_set)
"""
Find all actual rotation elements for the Molecule
based off of potential rotation elements from 'find_rotation_sets'
"""
molmoit = eigenmoit(calcmoit(mol))
if molmoit[1][1] == 0.0 && molmoit[1][2] == molmoit[1][3]
# This block executes if the Molecule is linear
paxis = normalize(mol[1].xyz)
re = rotation_element(paxis, 0)
return [re]
end
rsi = rotation_set_intersection(rotation_set)
out = []
for i in rsi
rmat = Molecules.Cn(i.axis, i.order)
molB = Molecules.transform(mol, rmat)
c = Molecules.isequivalent(molB, mol)
if c
push!(out, i)
end
end
return out
end
function find_c2(mol::Molecule, SEAs)
"""
Find C2 rotations as outlined in the paper DOI: 10.1002/jcc.23493
Uses 'c2a', 'c2b', and 'c2c' to determine the presence of at least
one C2 axis.
"""
len = size(mol)[1]
for sea in SEAs
a = c2a(mol, sea)
if a !== nothing
return a
else
b = c2b(mol, sea)
if b !== nothing
return b
else
if sea.label == "Linear"
for sea2 in SEAs
if sea == sea2
continue
elseif sea2.label == "Linear"
c = c2c(mol, sea, sea2)
if c !== nothing
return c
end
end
end
end
end
end
end
return nothing
end
function is_there_ortho_c2(mol::Molecule, cn_axis::Vector{T}, SEAs) where T
"""
Finds C2 axes orthogonal to principal axis 'paxis' of Molecule
When passing a principal axis to 'c2a', 'c2b', and 'c2c', returns
true/false if there exists at least one C2 orthogonal to principal axis
"""
len = size(mol)[1]
for sea in SEAs
if c2a(mol, cn_axis, sea)
return true
elseif c2b(mol, cn_axis, sea)
return true
elseif sea.label == "Linear"
for sea2 in SEAs
if sea == sea2
continue
elseif sea2.label == "Linear"
if c2c(mol, cn_axis, sea, sea2)
return true
end
end
end
end
end
return false
end
function num_C2(mol::Molecule, SEAs)
"""
Counts number of unique C2 axes. Particularly useful for
finding high symmetry groups. Uses C2 algorithms a and b
but unlike previous functions, finds all unique C2 axes and
returns the count.
"""
axes = []
for sea in SEAs
a = all_c2a(mol, sea)
if a !== nothing
for i in a
push!(axes, i)
end
end
b = all_c2b(mol, sea)
if b !== nothing
for i in b
push!(axes, i)
end
end
end
if size(axes)[1] < 1
return nothing
end
unique_axes = [axes[1]]
for i in axes
check = true
for j in unique_axes
if issame_axis(i,j)
check = false
break
end
end
if check
push!(unique_axes, i)
end
end
return size(unique_axes)[1]
end
function c2a(mol, sea)
"""
Constructs a line going from the COM to the midpoint of every pair
of atoms in a set of SEAs (assuming that midpoint is not the COM).
That line is a potential C2 axis
"""
len = size(sea.set)[1]
for i = 1:len-1, j = i+1:len
midpoint = mol[sea.set[i]].xyz + mol[sea.set[j]].xyz
if midpoint == [0.0, 0.0, 0.0]
continue
else
c2 = Molecules.Cn(midpoint, 2)
molB = Molecules.transform(mol, c2)
check = Molecules.isequivalent(mol, molB)
if check
return midpoint
else
continue
end
end
end
return nothing
end
function c2b(mol, sea)
"""
Constructs a line from the COM to each atom in a set of SEAs.
That line is a potential C2 axis.
"""
len = size(sea.set)[1]
for i = 1:len
c2_axis = mol[sea.set[i]].xyz
c2 = Molecules.Cn(c2_axis, 2)
molB = Molecules.transform(mol, c2)
check = Molecules.isequivalent(mol, molB)
if check
return c2_axis
else
continue
end
end
return nothing
end
function c2c(mol, sea1, sea2)
"""
Given two linear sets of SEAs (ij and kl), a potential C2 axis
is the vector r_C2 = (i-j) x (k-l)
"""
rij = mol[sea1.set[1]].xyz - mol[sea1.set[2]].xyz
rkl = mol[sea2.set[1]].xyz - mol[sea2.set[2]].xyz
c2_axis = cross(rij, rkl)
c2 = Molecules.Cn(c2_axis, 2)
molB = Molecules.transform(mol, c2)
if Molecules.isequivalent(mol, molB)
return c2_axis
else
return nothing
end
end
function c2a(mol, cn_axis, sea)
len = size(sea.set)[1]
for i = 1:len-1, j = i+1:len
midpoint = mol[sea.set[i]].xyz + mol[sea.set[j]].xyz
if midpoint == [0.0, 0.0, 0.0]
continue
elseif issame_axis(midpoint, cn_axis)
continue
else
c2 = Molecules.Cn(midpoint, 2)
molB = Molecules.transform(mol, c2)
check = Molecules.isequivalent(mol, molB)
if check
return true
else
continue
end
end
end
return false
end
function c2b(mol, cn_axis, sea)
len = size(sea.set)[1]
for i = 1:len
c2_axis = mol[sea.set[i]].xyz
if issame_axis(c2_axis, cn_axis)
continue
else
c2 = Molecules.Cn(c2_axis, 2)
molB = Molecules.transform(mol, c2)
check = Molecules.isequivalent(mol, molB)
if check
return true
else
continue
end
end
end
return false
end
function c2c(mol, cn_axis, sea1, sea2)
rij = mol[sea1.set[1]].xyz - mol[sea1.set[2]].xyz
rkl = mol[sea2.set[1]].xyz - mol[sea2.set[2]].xyz
c2_axis = cross(rij, rkl)
if issame_axis(c2_axis, cn_axis)
return false
else
c2 = Molecules.Cn(c2_axis, 2)
molB = Molecules.transform(mol, c2)
return Molecules.isequivalent(mol, molB)
end
end
function all_c2a(mol, sea)
out = []
len = size(sea.set)[1]
for i = 1:len-1, j = i+1:len
midpoint = mol[sea.set[i]].xyz + mol[sea.set[j]].xyz
if midpoint == [0.0, 0.0, 0.0]
continue
else
c2 = Molecules.Cn(midpoint, 2)
molB = Molecules.transform(mol, c2)
check = Molecules.isequivalent(mol, molB)
if check
push!(out, midpoint)
else
continue
end
end
end
if size(out)[1] < 1
return nothing
end
return out
end
function all_c2b(mol, sea)
out = []
len = size(sea.set)[1]
for i = 1:len
c2_axis = mol[sea.set[i]].xyz
c2 = Molecules.Cn(c2_axis, 2)
molB = Molecules.transform(mol, c2)
check = Molecules.isequivalent(mol, molB)
if check
push!(out, c2_axis)
else
continue
end
end
if size(out)[1] < 1
return nothing
end
return out
end
function highest_ordered_axis(rotations::Vector{Any})
# Returns the largest element of the set of rotation orders
len = size(rotations)[1]
ns = []
for i = 1:len
push!(ns, rotations[i].order)
end
return sort(ns)[len]
end
function is_there_sigmah(mol::Molecule, paxis)
"""
Checks for reflection across plane with normal vector
equal to principal rotation axis
"""
σh = Molecules.reflection_matrix(paxis)
molB = Molecules.transform(mol, σh)
return Molecules.isequivalent(mol, molB)
end
function is_there_sigmav(mol, SEAs, paxis)
"""
Checks for the presence of reflection planes that
contain the principal c2_axis
"""
axes = []
for sea in SEAs
len = size(sea.set,1)
if len < 2
continue
end
A = sea.set[1]
for i = 2:len
B = sea.set[i]
#n = normalize(mol[A].xyz - mol[B].xyz)
n = mol[A].xyz - mol[B].xyz
σ = Molecules.reflection_matrix(n)
molB = Molecules.transform(mol, σ)
check = Molecules.isequivalent(mol, molB)
if check
push!(axes, n)
else
continue
end
end
end
if size(axes,1) < 1
if mol_is_planar(mol)
return true
else
return false
end
end
unique_axes = [axes[1]]
# This part is VERY inefficient!!!
for i in axes
check = true
for j in unique_axes
if issame_axis(i,j)
check = false
break
end
if check
push!(unique_axes, i)
end
end
end
for i in unique_axes
if issame_axis(i, paxis)
continue
else
return true
end
end
return false
end
function mol_is_planar(mol)
"""
Checks if Molecule is planar
"""
t = eltype(mol[1].xyz)
len = size(mol,1)
mat = zeros(Float64, (3,len))
for i = 1:len
mat[:,i] = mol[i].xyz
end
rank = LinearAlgebra.rank(mat)
if rank < 3
return true
end
return false
end |
proofpile-julia0005-42748 | {
"provenance": "014.jsonl.gz:242749"
} | import Base.convert
using MAT
using ForwardDiff
using Optim
using Polynomials
using JLD
using ROCAnalysis
using PyPlot
import Base.convert
using MAT
using ForwardDiff
using Optim
using Polynomials
using JLD
@everywhere path="/home/emma/github/fit_behaviour_julia/"
path_functions=path*"functions/"
path_save=path*"results/"
include(path_functions*"parameter_functions.jl")
include(path_functions*"functions_julia.jl")
include(path_functions*"simulations.jl")
Nstim=7000
Nframes=10
Tframe=.2
T=Tframe*Nframes
tau=1 #### this is always 1 in the theory.
dt_sim=tau/100.
NT=T/dt_sim
NTframes=NT/Nframes
sigma_a=.3
# model_f=Dict(:c2_f=>c2_urgency,:c4_f=>c4_const,:st_f=>st_logistic_time)
# param=Dict("c2"=>[0.7,0.05], "st"=>[2.0,0.5,0.5],"c4"=>[1.0],"sigma_a"=>sigma_a)
# model_num="2"
# model_f=Dict(:c2_f=>c2_urgency,:c4_f=>c4_const,:st_f=>st_logistic_time,:ro_f=>read_out_soft_sim)
# param=Dict("c2"=>[0.7,0.05], "st"=>[2.0,0.5,0.05],"c4"=>[1.0],"sigma_a"=>sigma_a,"ro"=>[3,0.3])
# model_num="3"
# model_f=Dict(:c2_f=>c2_urgency,:c4_f=>c4_const,:c6_f=>c6_const,:st_f=>st_const,:ro_f=>read_out_TW_sim)
# param=Dict("c2"=>[-0.025,1],"c4"=>[-0.5],"c6"=>[0.4], "st"=>[1.],"sigma_a"=>sigma_a)
model_num="6"
model_f=Dict(:c2_f=>c2_urgency,:c4_f=>c4_const,:c6_f=>c6_zeros,:bias_d_f=>bias_d_const,
:hbias_d_f=>history_bias_d_const,:st_f=>st_logistic_bias2,:x0_f=>initial_condition_bias_hbias,
:ro_f=>read_out_perfect_sim)
param=Dict("c2"=>[1,0],"c4"=>[1],"x0"=>[0.0,0.0], "st"=>[2.,2,0.,0.],"bias_d"=>[0.],
"hbias_d"=>[0.0],"sigma_a"=>sigma_a)
Nsigmas=5
sigma_max=1
sigmas=exp10.(linspace(-1.17,0.,5))
stim=randn(Nstim,Nframes)
stim_test=randn(Nstim,Nframes)
internal_noise=sigma_a*randn(Nstim,Int(NT))
internal_noise_test=sigma_a*randn(Nstim,Int(NT))
stim_trans=zeros(Nstim,Nframes)
s=zeros(Nframes)
for istim in 1:Nstim
stim[istim,:]=sigmas[(istim-1)%Nsigmas+1]*stim[istim,:]
stim_test[istim,:]=sigmas[(istim-1)%Nsigmas+1]*stim_test[istim,:]
end
for istim in 1:Nstim
# stim_trans[istim,:]=st_logistic_time(stim[istim,:],stim_trans[istim,:],param["st"])
#st_logistic_time(stim[istim,:],s,param["st"])
model_f[:st_f](stim[istim,:],s,param["st"])
stim_trans[istim,:]=s
end
figure()
plot(stim[:],stim_trans[:],".")
#
#
d,xfinal=simulation_Nframe_general(param,stim,internal_noise,T,tau,model_f)
d_test,_=simulation_Nframe_general(param,stim_test,internal_noise_test,T,tau,model_f)
println("hola ",mean(0.5*(d+1)),mean(.5*(1+sign(xfinal))))
for isigma in 1:Nsigmas
indices=isigma:Nsigmas:Nstim
end
#######Computing PR############
Ntrials=1000
Nstim_PR=100
d_pr=zeros(Ntrials,Nstim_PR)
d_pr_test=zeros(Ntrials,Nstim_PR)
x_pr=zeros(Ntrials)
for itrial in 1:Ntrials
internal_noise=sigma_a*randn(Nstim_PR,Int(Nframes*NTframes))
d_pr[itrial,:],x_pr=simulation_Nframe_general(param,stim[1:Nstim_PR,:],internal_noise,T,tau, model_f)
internal_noise=sigma_a*randn(Nstim_PR,Int(Nframes*NTframes))
d_pr_test[itrial,:],_=simulation_Nframe_general(param,stim_test[1:Nstim_PR,:],internal_noise,T,tau,model_f)
end
####compute trace ####
N_traces=20
istim_pdf=2
internal_noise=sigma_a*randn(N_traces,Int(Nframes*NTframes))
stim_pdf=transpose(repeat(stim[istim_pdf,:],1,N_traces))
d_traces,x=simulation_Nframe_general_trace(param,stim_pdf,internal_noise,T,tau, model_f)
PR_sim=zeros(Float64,Nstim_PR)
PR_sim_test=zeros(Float64,Nstim_PR)
for istim in 1:Nstim_PR
PR_sim[istim]= mean( 0.5.*(d_pr[:,istim]+1))
PR_sim_test[istim]= mean( 0.5.*(d_pr_test[:,istim]+1))
end
# path="/home/genis/model_fitting_julia/"
# filename="synthetic_data_DW_Nstim"*string(Nstim)*"_5sigmas_test_largeParam.jld"
filename="synthetic_data_DW_Nstim"*string(Nstim)*"_model"*model_num*".jld"
save_dict=Dict("d"=>d,"stim"=>stim,"param"=>param,"sigma_a"=>sigma_a,"Tframe"=>T,
"PR"=>PR_sim,"kernel"=>kernel_sigma, "PR_sim_test"=>PR_sim_test, "stim_test"=>stim_test,
"d_test"=>d_test,"Tframe"=>Tframe,"x_trace"=>x,"istim_pdf"=>istim_pdf)
JLD.save(path_save*filename,save_dict)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.