Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 14 additions & 2 deletions .github/workflows/benchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,10 @@ concurrency:
cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}

permissions:
contents: read
actions: write
contents: write
pull-requests: read
statuses: write

jobs:
test:
Expand Down Expand Up @@ -56,7 +59,7 @@ jobs:
- name: "Set up Julia"
uses: julia-actions/setup-julia@v2
with:
ref: v${{ matrix.julia-version }}
version: v${{ matrix.julia-version }}
- name: "Set up Python"
uses: actions/setup-python@v5
with:
Expand Down Expand Up @@ -94,3 +97,12 @@ jobs:
make gh_action_benchmarks.html
- name: "Print benchmark data"
run: cat gh_action_benchmarks.csv
- uses: julia-actions/cache@v2
- name: "Build site"
run: |
julia --color=yes --project=docs -e 'using Pkg; Pkg.instantiate()'
julia --color=yes --project=docs --compiled-modules=existing docs/make.jl
- name: "Deploy site"
uses: julia-actions/julia-docdeploy@latest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
4 changes: 2 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@ authors = ["Viral B. Shah <ViralBShah@users.noreply.github.com>"]
version = "0.1.0"

[deps]
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Gadfly = "c91e804a-d5a3-530f-b6f0-dfbca275c004"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"

[compat]
Documenter = "1"
CSV = "0.10.15"
DataFrames = "1.7.0"
Gadfly = "1.4.1"
StatsBase = "0.34.5"
13 changes: 13 additions & 0 deletions docs/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
[deps]
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
Gadfly = "c91e804a-d5a3-530f-b6f0-dfbca275c004"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"

[compat]
CSV = "0.10.15"
DataFrames = "1.7.0"
Documenter = "1"
Gadfly = "1.4.1"
StatsBase = "0.34.5"
83 changes: 83 additions & 0 deletions docs/make.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
using Documenter, DataFrames, CSV, StatsBase, Gadfly

function makeplot(benchfile::String)
# Load benchmark data from file
benchmarks = CSV.read(benchfile, DataFrame; header=["language", "benchmark", "time"])

# Capitalize and decorate language names from datafile
dict = Dict("c"=>"C", "julia"=>"Julia", "lua"=>"LuaJIT", "fortran"=>"Fortran", "java"=>"Java",
"javascript"=>"JavaScript", "matlab"=>"Matlab", "mathematica"=>"Mathematica",
"python"=>"Python", "octave"=>"Octave", "r"=>"R", "rust"=>"Rust", "go"=>"Go");
benchmarks[!,:language] = [dict[lang] for lang in benchmarks[!,:language]]

# Normalize benchmark times by C times
ctime = benchmarks[benchmarks[!,:language] .== "C", :]
benchmarks = innerjoin(benchmarks, ctime, on=:benchmark, makeunique=true)
select!(benchmarks, Not([:language_1]))
rename!(benchmarks, :time_1 =>:ctime)
benchmarks[!,:normtime] = benchmarks[!,:time] ./ benchmarks[!,:ctime];

# Compute the geometric mean for each language
langs = [];
means = [];
priorities = [];
for lang in unique(benchmarks[!,:language])
data = benchmarks[benchmarks[!,:language].== lang, :]
gmean = geomean(data[!,:normtime])
push!(langs, lang)
push!(means, gmean)
if (lang == "C")
push!(priorities, 1)
elseif (lang == "Julia")
push!(priorities, 2)
else
push!(priorities, 3)
end
end

# Add the geometric means back into the benchmarks dataframe
langmean = DataFrame(language=langs, geomean = means, priority = priorities)
benchmarks = innerjoin(benchmarks, langmean, on=:language)

# Put C first, Julia second, and sort the rest by geometric mean
sort!(benchmarks, [:priority, :geomean]);
sort!(langmean, [:priority, :geomean]);

p = plot(benchmarks,
x = :language,
y = :normtime,
color = :benchmark,
Scale.y_log10,
Guide.ylabel(nothing),
Guide.xlabel(nothing),
Coord.Cartesian(ymin=-0.5),
Theme(
guide_title_position = :left,
colorkey_swatch_shape = :circle,
minor_label_font = "Georgia",
major_label_font = "Georgia"
),
)

golden = MathConstants.golden
draw(SVG("docs/src/benchmarks.svg", 10inch, 10inch/golden), p)
end

makeplot("gh_action_benchmarks.csv")

makedocs(
format = Documenter.HTML(),
sitename = "Julia Microbenchmarks",
pages = [
"Microbenchmarks" => "index.md",
"Notes" => "notes.md",
],
)

deploydocs(
repo = "github.com/JuliaLang/Microbenchmarks.jl.git",
target = "build",
deps = nothing,
make = nothing,
push_preview = true,
)
12 changes: 12 additions & 0 deletions docs/src/index.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Julia MicroBenchmarks

These micro-benchmarks, while not comprehensive, do test compiler
performance on a range of common code patterns, such as function
calls, string parsing, sorting, numerical loops, random number
generation, recursion, and array operations.

![Benchmark results](benchmarks.svg)

These micro-benchmark results were obtained on a single core (serial
execution) on [Github
Actions](https://github.com/JuliaLang/Microbenchmarks.jl/actions).
19 changes: 19 additions & 0 deletions docs/src/notes.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Notes

The Julia results depicted above do not include compile time.

It is important to note that the benchmark codes are not written for
absolute maximal performance.

For example, the fastest code to compute `recursion_fibonacci(20)` is
the constant literal `6765`. Instead, the benchmarks are written to
test the performance of identical algorithms and code patterns
implemented in each language. The Fibonacci benchmarks all use the
same (inefficient) doubly-recursive algorithm, and the pi summation
benchmarks use the same for-loop.

The “algorithm” for matrix multiplication is to call the most obvious
built-in/standard random-number and matmul routines (or to directly
call BLAS if the language does not provide a high-level matmul),
except where a matmul/BLAS call is not possible (such as in
JavaScript).
4 changes: 2 additions & 2 deletions lua/lua-install.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#!/bin/sh
# Install lua-sci-lang as recommended via ulua

wget https://ulua.io/download/ulua~latest.zip
unzip ulua~latest.zip
wget -q https://ulua.io/download/ulua~latest.zip
unzip -q ulua~latest.zip
sed -i 's/noconfirm = false,/noconfirm = true,/g' ulua/host/config.lua
ulua/bin/upkg add time
ulua/bin/upkg add sci
Expand Down
24 changes: 0 additions & 24 deletions src/Microbenchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -48,28 +48,4 @@ function read_bench(benchfile::String)
return benchmarks
end

benchmarks = read_bench("benchmarks.csv")

#=
p = plot(benchmarks,
x = :language,
y = :normtime,
color = :benchmark,
Scale.y_log10,
Guide.ylabel(nothing),
Guide.xlabel(nothing),
Coord.Cartesian(ymin=-0.5),
Theme(
guide_title_position = :left,
colorkey_swatch_shape = :circle,
minor_label_font = "Georgia",
major_label_font = "Georgia"
),
)

golden = MathConstants.golden
#draw(SVG(8inch,8inch/golden), p)
draw(SVG("benchmarks.svg", 10inch, 10inch/golden), p)
=#

end #module
Loading