Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
HPC Benchmark Game
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Dorian Stoll
HPC Benchmark Game
Commits
5298883a
Verified
Commit
5298883a
authored
8 months ago
by
Dorian Stoll
Browse files
Options
Downloads
Patches
Plain Diff
validation: Add rodinia-srad
parent
3b406c28
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
Dockerfile
+2
-1
2 additions, 1 deletion
Dockerfile
validation/rodinia-srad.py
+157
-0
157 additions, 0 deletions
validation/rodinia-srad.py
with
159 additions
and
1 deletion
Dockerfile
+
2
−
1
View file @
5298883a
...
...
@@ -79,7 +79,7 @@ RUN julia --project="src/benchmarks/nas-ft/julia" -e "using Pkg; Pkg.instantiate
# Install Julia benchmarks
RUN
ln
-s
"../src/benchmarks/ca/julia/run.sh"
"bin/ca-julia"
RUN
ln
.
s
"../src/benchmarks/rodinia-srad/julia/run.sh"
"bin/rodinia-srad-julia"
RUN
ln
-
s
"../src/benchmarks/rodinia-srad/julia/run.sh"
"bin/rodinia-srad-julia"
# Install measurement scripts
RUN
ln
-s
"../measure/ca.sh"
"bin/measure-ca"
...
...
@@ -93,3 +93,4 @@ RUN ln -s /output output
# Validate the benchmarks
RUN
python3 validation/ca.py
RUN
python3 validation/rodinia-srad.py
This diff is collapsed.
Click to expand it.
validation/rodinia-srad.py
0 → 100644
+
157
−
0
View file @
5298883a
#!/usr/bin/env python3
from
__future__
import
annotations
import
math
import
subprocess
import
sys
from
dataclasses
import
dataclass
from
subprocess
import
CompletedProcess
BENCHMARKS
:
list
[
str
]
=
[
"
rodinia-srad-c-gcc
"
,
"
rodinia-srad-cpp-gcc
"
,
"
rodinia-srad-fortran-gcc
"
,
"
rodinia-srad-c-clang
"
,
"
rodinia-srad-cpp-clang
"
,
"
rodinia-srad-fortran-llvm-flang
"
,
"
rodinia-srad-julia
"
,
]
REFERENCE
:
str
=
"
rodinia-srad-c-gcc
"
# Parameters for evaluation
ROWS
:
int
=
2048
COLS
:
int
=
2048
Y1
:
int
=
0
Y2
:
int
=
127
X1
:
int
=
0
X2
:
int
=
127
LAMBDA
:
float
=
0.5
ITERATIONS
:
int
=
2
# How many decimal places are present in the output
DECIMAL_PLACES
:
int
=
5
# The last decimal place may differ by one
TOLERANCE
:
float
=
1
*
(
10
**
DECIMAL_PLACES
)
@dataclass
class
Result
(
object
):
strings
:
list
[
str
]
values
:
list
[
float
]
@dataclass
class
Errors
(
object
):
absolute
:
list
[
float
]
relative
:
list
[
float
]
def
run_benchmark
(
name
:
str
)
->
Result
:
strings
:
list
[
str
]
=
[]
values
:
list
[
float
]
=
[]
cmd
:
CompletedProcess
[
str
]
=
subprocess
.
run
(
f
"
{
name
}
{
ROWS
}
{
COLS
}
{
Y1
}
{
Y2
}
{
X1
}
{
X2
}
{
LAMBDA
}
{
ITERATIONS
}
"
,
check
=
True
,
shell
=
True
,
encoding
=
"
utf-8
"
,
capture_output
=
True
,
)
lines
:
list
[
str
]
=
cmd
.
stdout
.
splitlines
()
strings
+=
lines
[:
2
]
for
value
in
lines
[
2
:
-
1
]:
values
+=
[
float
(
x
)
for
x
in
value
.
strip
().
split
(
"
"
)]
strings
+=
lines
[
-
1
]
return
Result
(
strings
,
values
)
def
calculate_error
(
result
:
Result
,
reference
:
Result
)
->
Errors
:
absolute
:
list
[
float
]
=
[]
relative
:
list
[
float
]
=
[]
for
res
,
ref
in
zip
(
result
.
values
,
reference
.
values
):
err
:
float
=
abs
(
ref
-
res
)
absolute
.
append
(
err
)
relative
.
append
(
err
/
ref
)
return
Errors
(
absolute
,
relative
)
def
check_error
(
error
,
tolerance
,
decimals
)
->
bool
:
error
=
int
(
math
.
floor
(
error
*
(
10
**
decimals
)))
tolerance
=
int
(
math
.
floor
(
tolerance
*
(
10
**
decimals
)))
return
error
<=
tolerance
def
validate_result
(
result
:
Result
,
reference
:
Result
,
errors
:
Errors
)
->
bool
:
if
not
len
(
result
.
strings
)
==
len
(
reference
.
strings
):
return
False
if
not
len
(
result
.
values
)
==
len
(
reference
.
values
):
return
False
for
res
,
ref
in
zip
(
result
.
strings
,
reference
.
strings
):
if
not
res
==
ref
:
return
False
if
not
all
(
check_error
(
x
,
TOLERANCE
,
DECIMAL_PLACES
)
for
x
in
errors
.
absolute
):
return
False
return
True
def
main
()
->
int
:
results
:
dict
[
str
,
Result
]
=
{}
for
bench
in
BENCHMARKS
:
try
:
results
[
bench
]
=
run_benchmark
(
bench
)
except
Exception
:
print
(
f
"
Failed to run
{
bench
}
"
)
continue
if
REFERENCE
not
in
results
:
print
(
"
The reference program did not run, cannot validate
"
)
return
1
allpass
:
bool
=
True
refresult
:
Result
=
results
[
REFERENCE
]
for
bench
in
BENCHMARKS
:
if
bench
==
REFERENCE
:
continue
result
:
Result
=
results
[
bench
]
errors
:
Errors
=
calculate_error
(
result
,
refresult
)
if
not
validate_result
(
result
,
refresult
,
errors
):
allpass
=
False
print
(
"
The output of {bench} does not match the reference
"
)
abs
:
float
=
sum
(
errors
.
absolute
)
minabs
:
float
=
min
(
errors
.
absolute
)
minrel
:
float
=
min
(
errors
.
relative
)
*
100
maxabs
:
float
=
max
(
errors
.
absolute
)
maxrel
:
float
=
max
(
errors
.
absolute
)
*
100
print
(
f
"
{
bench
}
: abs:
{
abs
:
.
5
f
}
min: (
{
minabs
:
.
5
f
}
,
{
minrel
:
.
5
f
}
%) max: (
{
maxabs
:
.
5
f
}
,
{
maxrel
:
.
5
f
}
%)
"
)
if
allpass
:
return
0
else
:
return
1
if
__name__
==
"
__main__
"
:
sys
.
exit
(
main
())
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment