diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml new file mode 100644 index 0000000000..0decc88b49 --- /dev/null +++ b/.github/workflows/SpellCheck.yml @@ -0,0 +1,13 @@ +name: Spell Check + +on: [pull_request] + +jobs: + typos-check: + name: Spell Check with Typos + runs-on: ubuntu-latest + steps: + - name: Checkout Actions Repository + uses: actions/checkout@v4 + - name: Check spelling + uses: crate-ci/typos@v1.17.0 \ No newline at end of file diff --git a/README.md b/README.md index 8ae6f0f6e8..928ff38715 100644 --- a/README.md +++ b/README.md @@ -37,9 +37,9 @@ the documentation, which contains the unreleased features. - Integrated logging suite for handling connections to TensorBoard - Handling of (partial) integro-differential equations and various stochastic equations - Specialized forms for solving `ODEProblem`s with neural networks - - Compatability with [Flux.jl](https://fluxml.ai/) and [Lux.jl](https://lux.csail.mit.edu/) + - Compatibility with [Flux.jl](https://fluxml.ai/) and [Lux.jl](https://lux.csail.mit.edu/) for all of the GPU-powered machine learning layers available from those libraries. - - Compatability with [NeuralOperators.jl](https://docs.sciml.ai/NeuralOperators/stable/) for + - Compatibility with [NeuralOperators.jl](https://docs.sciml.ai/NeuralOperators/stable/) for mixing DeepONets and other neural operators (Fourier Neural Operators, Graph Neural Operators, etc.) with physics-informed loss functions diff --git a/docs/src/index.md b/docs/src/index.md index 08960823e8..2b03424c10 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -88,7 +88,7 @@ the precision of the arguments are correct, and anything that requires alternati Lux.jl has none of these issues, is simpler to work with due to the parameters in its function calls being explicit rather than implicit global references, and achieves higher performance. It is built on the same foundations as Flux.jl, such as Zygote and NNLib, and thus it supports the -same layers underneith and calls the same kernels. The better performance comes from not having the overhead of `restructure` required. +same layers underneath and calls the same kernels. The better performance comes from not having the overhead of `restructure` required. Thus we highly recommend people use Lux instead and only use the Flux fallbacks for legacy code. ## Reproducibility diff --git a/docs/src/tutorials/Lotka_Volterra_BPINNs.md b/docs/src/tutorials/Lotka_Volterra_BPINNs.md index cbbfe3d4db..b21d6685e5 100644 --- a/docs/src/tutorials/Lotka_Volterra_BPINNs.md +++ b/docs/src/tutorials/Lotka_Volterra_BPINNs.md @@ -94,7 +94,7 @@ sol_pestim = solve(prob, alg; saveat = dt) nothing #hide ``` -The solution for the ODE is retured as a nested vector `sol_flux_pestim.ensemblesol`. Here, [$x$ , $y$] would be returned. +The solution for the ODE is returned as a nested vector `sol_flux_pestim.ensemblesol`. Here, [$x$ , $y$] would be returned. ```@example bpinn # plotting solution for x,y for chain diff --git a/docs/src/tutorials/neural_adapter.md b/docs/src/tutorials/neural_adapter.md index db4ac9574b..762a5108f6 100644 --- a/docs/src/tutorials/neural_adapter.md +++ b/docs/src/tutorials/neural_adapter.md @@ -104,7 +104,7 @@ plot(p1, p2, p3, p4, p5) ## Domain decomposition In this example, we first obtain a prediction of 2D Poisson equation on subdomains. We split up full domain into 10 sub problems by x, and create separate neural networks for each sub interval. If x domain ∈ [x_0, x_end] so, it is decomposed on 10 part: sub x domains = {[x_0, x_1], ... [x_i,x_i+1], ..., x_9,x_end]}. -And then using the method neural_adapter, we retrain the banch of 10 predictions to the one prediction for full domain of task. +And then using the method neural_adapter, we retrain the batch of 10 predictions to the one prediction for full domain of task. ![domain_decomposition](https://user-images.githubusercontent.com/12683885/127149752-a4ecea50-2984-45d8-b0d4-d2eadecf58e7.png) diff --git a/src/BPINN_ode.jl b/src/BPINN_ode.jl index d91dd4b96a..4493a07326 100644 --- a/src/BPINN_ode.jl +++ b/src/BPINN_ode.jl @@ -146,7 +146,7 @@ end BPINN Solution contains the original solution from AdvancedHMC.jl sampling(BPINNstats contains fields related to that) > ensemblesol is the Probabilistic Estimate(MonteCarloMeasurements.jl Particles type) of Ensemble solution from All Neural Network's(made using all sampled parameters) output's. > estimated_nn_params - Probabilistic Estimate of NN params from sampled weights,biases -> estimated_de_params - Probabilistic Estimate of DE params from sampled unknown DE paramters +> estimated_de_params - Probabilistic Estimate of DE params from sampled unknown DE parameters """ struct BPINNsolution{O <: BPINNstats, E, NP, OP, P} original::O @@ -224,7 +224,7 @@ function DiffEqBase.__solve(prob::DiffEqBase.ODEProblem, throw(error("Only Lux.AbstractExplicitLayer neural networks are supported")) end - # contructing ensemble predictions + # constructing ensemble predictions ensemblecurves = Vector{}[] # check if NN output is more than 1 numoutput = size(luxar[1])[1] diff --git a/src/PDE_BPINN.jl b/src/PDE_BPINN.jl index 344d007963..02fc3d30cc 100644 --- a/src/PDE_BPINN.jl +++ b/src/PDE_BPINN.jl @@ -111,9 +111,9 @@ function L2LossData(Tar::PDELogTargetDensity, θ) dataset = Tar.dataset sumt = 0 L2stds = Tar.allstd[3] - # each dep var has a diff dataset depending on its indep var and thier domains + # each dep var has a diff dataset depending on its indep var and their domains # these datasets are matrices of first col-dep var and remaining cols-all indep var - # Tar.init_params is needed to contruct a vector of parameters into a ComponentVector + # Tar.init_params is needed to construct a vector of parameters into a ComponentVector # dataset of form Vector[matrix_x, matrix_y, matrix_z] # matrix_i is of form [i,indvar1,indvar2,..] (needed in case if heterogenous domains) @@ -279,7 +279,7 @@ end * `Adaptorkwargs`: `Adaptor`, `Metric`, `targetacceptancerate`. Refer: https://turinglang.org/AdvancedHMC.jl/stable/ Note: Target percentage(in decimal) of iterations in which the proposals are accepted (0.8 by default). * `Integratorkwargs`: `Integrator`, `jitter_rate`, `tempering_rate`. Refer: https://turinglang.org/AdvancedHMC.jl/stable/ -* `saveats`: Grid spacing for each independant variable for evaluation of ensemble solution, estimated parameters. +* `saveats`: Grid spacing for each independent variable for evaluation of ensemble solution, estimated parameters. * `numensemble`: Number of last samples to take for creation of ensemble solution, estimated parameters. * `progress`: controls whether to show the progress meter or not. * `verbose`: controls the verbosity. (Sample call args in AHMC). @@ -323,7 +323,7 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization; chain = discretization.chain if length(pinnrep.domains) != length(saveats) - throw(error("Number of independant variables must match saveat inference discretization steps")) + throw(error("Number of independent variables must match saveat inference discretization steps")) end # NN solutions for loglikelihood which is used for L2lossdata diff --git a/src/advancedHMC_MCMC.jl b/src/advancedHMC_MCMC.jl index 9dd22cceb2..348ed57a0d 100644 --- a/src/advancedHMC_MCMC.jl +++ b/src/advancedHMC_MCMC.jl @@ -399,7 +399,7 @@ Incase you are only solving the Equations for solution, do not provide dataset ## Keyword Arguments * `strategy`: The training strategy used to choose the points for the evaluations. By default GridTraining is used with given physdt discretization. -* `init_params`: intial parameter values for BPINN (ideally for multiple chains different initializations preferred) +* `init_params`: initial parameter values for BPINN (ideally for multiple chains different initializations preferred) * `nchains`: number of chains you want to sample * `draw_samples`: number of samples to be drawn in the MCMC algorithms (warmup samples are ~2/3 of draw samples) * `l2std`: standard deviation of BPINN prediction against L2 losses/Dataset @@ -408,7 +408,7 @@ Incase you are only solving the Equations for solution, do not provide dataset * `param`: Vector of chosen ODE parameters Distributions in case of Inverse problems. * `autodiff`: Boolean Value for choice of Derivative Backend(default is numerical) * `physdt`: Timestep for approximating ODE in it's Time domain. (1/20.0 by default) -* `Kernel`: Choice of MCMC Sampling Algorithm (AdvancedHMC.jl implemenations HMC/NUTS/HMCDA) +* `Kernel`: Choice of MCMC Sampling Algorithm (AdvancedHMC.jl implementations HMC/NUTS/HMCDA) * `Integratorkwargs`: `Integrator`, `jitter_rate`, `tempering_rate`. Refer: https://turinglang.org/AdvancedHMC.jl/stable/ * `Adaptorkwargs`: `Adaptor`, `Metric`, `targetacceptancerate`. Refer: https://turinglang.org/AdvancedHMC.jl/stable/ Note: Target percentage(in decimal) of iterations in which the proposals are accepted (0.8 by default) diff --git a/src/pinn_types.jl b/src/pinn_types.jl index 3ad4e8d91a..e78c0da089 100644 --- a/src/pinn_types.jl +++ b/src/pinn_types.jl @@ -15,13 +15,13 @@ struct LogOptions end end -"""This function is defined here as stubs to be overriden by the subpackage NeuralPDELogging if imported""" +"""This function is defined here as stubs to be overridden by the subpackage NeuralPDELogging if imported""" function logvector(logger, v::AbstractVector{R}, name::AbstractString, step::Integer) where {R <: Real} nothing end -"""This function is defined here as stubs to be overriden by the subpackage NeuralPDELogging if imported""" +"""This function is defined here as stubs to be overridden by the subpackage NeuralPDELogging if imported""" function logscalar(logger, s::R, name::AbstractString, step::Integer) where {R <: Real} nothing end @@ -187,7 +187,7 @@ methodology. * `Dataset`: A vector of matrix, each matrix for ith dependant variable and first col in matrix is for dependant variables, - remaining coloumns for independant variables. Needed for inverse problem solving. + remaining columns for independent variables. Needed for inverse problem solving. * `init_params`: the initial parameters of the neural networks. If `init_params` is not given, then the neural network default parameters are used. Note that for Lux, the default will convert to Float64. diff --git a/src/training_strategies.jl b/src/training_strategies.jl index a419afcdbf..5739ac4797 100644 --- a/src/training_strategies.jl +++ b/src/training_strategies.jl @@ -23,7 +23,7 @@ function merge_strategy_with_loglikelihood_function(pinnrep::PINNRepresentation, eltypeθ = eltype(pinnrep.flat_init_params) - # is vec as later each _set in pde_train_sets are coloumns as points transformed to vector of points (pde_train_sets must be rowwise) + # is vec as later each _set in pde_train_sets are columns as points transformed to vector of points (pde_train_sets must be rowwise) pde_loss_functions = if !(train_sets_pde isa Nothing) pde_train_sets = [train_set[:, 2:end] for train_set in train_sets_pde] pde_train_sets = adapt.(parameterless_type(ComponentArrays.getdata(flat_init_params)), diff --git a/test/IDE_tests.jl b/test/IDE_tests.jl index d9aa9eb3cf..7fcb15604a 100644 --- a/test/IDE_tests.jl +++ b/test/IDE_tests.jl @@ -59,7 +59,7 @@ eq = Ix(u(x) * cos(x)) ~ (x^3) / 3 @test Flux.mse(u_real, u_predict) < 0.001 end -@testset "Example 3 - 2 Inputs, 1 Ouput" begin +@testset "Example 3 - 2 Inputs, 1 Output" begin @parameters x, y @variables u(..) Dx = Differential(x) @@ -82,7 +82,7 @@ end @test Flux.mse(u_real, u_predict) < 0.001 end -@testset "Example 4 - 2 Inputs, 1 Ouput" begin +@testset "Example 4 - 2 Inputs, 1 Output" begin @parameters x, y @variables u(..) Dx = Differential(x)