Skip to content

Commit

Permalink
Merge pull request #427 from eyra/next/hotfix
Browse files Browse the repository at this point in the history
Next/hotfix
  • Loading branch information
mellelieuwes committed Jun 25, 2023
2 parents 9d10bd2 + a7d21f4 commit 4acc657
Show file tree
Hide file tree
Showing 3 changed files with 318 additions and 33 deletions.
43 changes: 42 additions & 1 deletion core/systems/benchmark/_public.ex
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,50 @@ defmodule Systems.Benchmark.Public do
)
end

def import(entries, tool_id) when is_list(entries) do
defp parse_entry(line) do
{id, line} = Map.pop(line, "id")
{status, line} = Map.pop(line, "status")
{message, line} = Map.pop(line, "error_message")

submission_id =
id
|> String.split(":")
|> List.first()
|> String.to_integer()

%{
submission_id: submission_id,
status: status,
message: message,
scores: parse_scores(line)
}
end

defp parse_scores(%{} = scores) do
Enum.map(scores, fn {metric, value} ->
score =
case Float.parse(value) do
{score, ""} -> score
_ -> 0
end

%{
name: metric,
score: score
}
end)
end

def import_csv_lines(csv_lines, tool_id) when is_integer(tool_id) do
tool = get_tool!(tool_id)

csv_lines
|> Enum.filter(&(Map.get(&1, "status") == "success"))
|> Enum.map(&parse_entry/1)
|> Benchmark.Public.import_entries(tool)
end

def import_entries(entries, %Benchmark.ToolModel{} = tool) when is_list(entries) do
names =
entries
|> List.first(%{scores: []})
Expand Down
33 changes: 1 addition & 32 deletions core/systems/benchmark/leaderboard_overview.ex
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@ defmodule Systems.Benchmark.LeaderboardOverview do

@impl true
def update(%{csv_lines: csv_lines}, %{assigns: %{entity: %{id: tool_id}}} = socket) do
csv_lines
|> Enum.filter(&(Map.get(&1, "status") == "success"))
|> Enum.map(&parse_entry/1)
|> Benchmark.Public.import(tool_id)
Benchmark.Public.import_csv_lines(csv_lines, tool_id)

{
:ok,
Expand Down Expand Up @@ -74,34 +71,6 @@ defmodule Systems.Benchmark.LeaderboardOverview do
assign(socket, forward_button: forward_button)
end

defp parse_entry(line) do
{id, line} = Map.pop(line, "id")
{status, line} = Map.pop(line, "status")
{message, line} = Map.pop(line, "error_message")

submission_id =
id
|> String.split(":")
|> List.first()
|> String.to_integer()

%{
submission_id: submission_id,
status: status,
message: message,
scores: parse_scores(line)
}
end

defp parse_scores(%{} = scores) do
Enum.map(scores, fn {metric, value} ->
%{
name: metric,
score: String.to_float(value)
}
end)
end

@impl true
def render(assigns) do
~H"""
Expand Down
275 changes: 275 additions & 0 deletions core/test/systems/benchmark/_public_test.exs
Original file line number Diff line number Diff line change
@@ -0,0 +1,275 @@
defmodule Systems.Benchmark.PublicTest do
use Core.DataCase

alias Systems.{
Benchmark
}

test "import_csv_lines/1 one line" do
%{id: toold_id} = tool = create_tool()
%{name: name} = spot = create_spot(tool)
%{id: submission_id, description: description} = create_submission(spot)

cat1 = "aap"
cat2 = "noot"
cat3 = "mies"

cat1_score = 0.1
cat2_score = 0.2
cat3_score = 0.3

csv_line = %{
"id" => "#{submission_id}:#{name}:#{description}",
"status" => "success",
"error_message" => "",
cat1 => "#{cat1_score}",
cat2 => "#{cat2_score}",
cat3 => "#{cat3_score}"
}

{:ok, result} = Benchmark.Public.import_csv_lines([csv_line], tool.id)

cat1_score_key = "#{cat1}-#{submission_id}"
cat2_score_key = "#{cat2}-#{submission_id}"
cat3_score_key = "#{cat3}-#{submission_id}"

assert Enum.count(Map.keys(result)) == 7

assert count_leaderboards() == 3
assert count_scores() == 3

assert %{
{:leaderboard, ^cat1} => %Systems.Benchmark.LeaderboardModel{
name: ^cat1,
tool_id: ^toold_id
},
{:leaderboard, ^cat2} => %Systems.Benchmark.LeaderboardModel{
name: ^cat2,
tool_id: ^toold_id
},
{:leaderboard, ^cat3} => %Systems.Benchmark.LeaderboardModel{
name: ^cat3,
tool_id: ^toold_id
},
{:score, ^cat1_score_key} => %Systems.Benchmark.ScoreModel{
score: ^cat1_score,
submission_id: ^submission_id
},
{:score, ^cat2_score_key} => %Systems.Benchmark.ScoreModel{
score: ^cat2_score,
submission_id: ^submission_id
},
{:score, ^cat3_score_key} => %Systems.Benchmark.ScoreModel{
score: ^cat3_score,
submission_id: ^submission_id
}
} = result
end

test "import_csv_lines/1 one line with score `0`" do
%{id: toold_id} = tool = create_tool()
%{name: name} = spot = create_spot(tool)
%{id: submission_id, description: description} = create_submission(spot)

cat1 = "aap"

csv_line = %{
"id" => "#{submission_id}:#{name}:#{description}",
"status" => "success",
"error_message" => "",
cat1 => "0"
}

{:ok, result} = Benchmark.Public.import_csv_lines([csv_line], tool.id)

cat1_score_key = "#{cat1}-#{submission_id}"

assert Enum.count(Map.keys(result)) == 3

assert count_leaderboards() == 1
assert count_scores() == 1

assert %{
{:leaderboard, ^cat1} => %Systems.Benchmark.LeaderboardModel{
name: ^cat1,
tool_id: ^toold_id
},
{:score, ^cat1_score_key} => %Systems.Benchmark.ScoreModel{
score: 0.0,
submission_id: ^submission_id
}
} = result
end

test "import_csv_lines/1 one line with empty score" do
%{id: toold_id} = tool = create_tool()
%{name: name} = spot = create_spot(tool)
%{id: submission_id, description: description} = create_submission(spot)

cat1 = "aap"

csv_line = %{
"id" => "#{submission_id}:#{name}:#{description}",
"status" => "success",
"error_message" => "",
cat1 => ""
}

{:ok, result} = Benchmark.Public.import_csv_lines([csv_line], tool.id)

cat1_score_key = "#{cat1}-#{submission_id}"

assert Enum.count(Map.keys(result)) == 3

assert count_leaderboards() == 1
assert count_scores() == 1

assert %{
{:leaderboard, ^cat1} => %Systems.Benchmark.LeaderboardModel{
name: ^cat1,
tool_id: ^toold_id
},
{:score, ^cat1_score_key} => %Systems.Benchmark.ScoreModel{
score: 0.0,
submission_id: ^submission_id
}
} = result
end

test "import_csv_lines/1 two lines two submissions" do
%{id: tool_id} = tool = create_tool()
%{name: name} = spot = create_spot(tool)
%{id: submission_id1, description: description1} = create_submission(spot, "Method X")
%{id: submission_id2, description: description2} = create_submission(spot, "Method Y")

csv_line1 = %{
"id" => "#{submission_id1}:#{name}:#{description1}",
"status" => "success",
"error_message" => "",
"cat1" => "0.1"
}

csv_line2 = %{
"id" => "#{submission_id2}:#{name}:#{description2}",
"status" => "success",
"error_message" => "",
"cat1" => "0.2"
}

{:ok, result} = Benchmark.Public.import_csv_lines([csv_line1, csv_line2], tool_id)

assert Enum.count(Map.keys(result)) == 4

assert count_leaderboards() == 1
assert count_scores() == 2
end

test "import_csv_lines/1 two lines one submission should fail" do
%{id: tool_id} = tool = create_tool()
%{name: name} = spot = create_spot(tool)
%{id: submission_id, description: description} = create_submission(spot, "Method X")

csv_line1 = %{
"id" => "#{submission_id}:#{name}:#{description}",
"status" => "success",
"error_message" => "",
"cat1" => "0.1"
}

csv_line2 = %{
"id" => "#{submission_id}:#{name}:#{description}",
"status" => "success",
"error_message" => "",
"cat1" => "0.2"
}

assert_raise RuntimeError, fn ->
Benchmark.Public.import_csv_lines([csv_line1, csv_line2], tool_id)
end
end

test "import_csv_lines/1 ignore errors" do
%{id: tool_id} = tool = create_tool()
%{name: name} = spot = create_spot(tool)
%{id: submission_id1, description: description1} = create_submission(spot, "Method X")
%{id: submission_id2, description: description2} = create_submission(spot, "Method Y")

csv_line1 = %{
"id" => "#{submission_id1}:#{name}:#{description1}",
"status" => "success",
"error_message" => "",
"cat1" => "0.1"
}

csv_line2 = %{
"id" => "#{submission_id2}:#{name}:#{description2}",
"status" => "error",
"error_message" => "Something went wrong",
"cat1" => ""
}

{:ok, result} = Benchmark.Public.import_csv_lines([csv_line1, csv_line2], tool_id)

assert Enum.count(Map.keys(result)) == 3
end

test "import_csv_lines/1 two version after two imports" do
tool = create_tool()
%{name: name} = spot = create_spot(tool)
%{id: submission_id, description: description} = create_submission(spot)

cat1 = "aap"

csv_line = %{
"id" => "#{submission_id}:#{name}:#{description}",
"status" => "success",
"error_message" => "",
cat1 => ""
}

{:ok, %{version: version1}} = Benchmark.Public.import_csv_lines([csv_line], tool.id)
{:ok, %{version: version2}} = Benchmark.Public.import_csv_lines([csv_line], tool.id)

assert version1 < version2

assert count_leaderboards() == 2
assert count_scores() == 2
end

defp create_tool() do
Factories.insert!(:benchmark_tool, %{status: :concept, director: :project})
end

defp create_spot(tool, name \\ "Team Eyra") do
Factories.insert!(:benchmark_spot, %{tool: tool, name: name})
end

defp create_submission(spot, description \\ "Method X") do
submission_attr = %{
spot: spot,
description: description,
github_commit_url:
"https://github.com/eyra/mono/commit/9d10bd2907dda135ebe86511489570dbf8c067c0"
}

Factories.insert!(:benchmark_submission, submission_attr)
end

defp count_scores() do
Repo.one(
Ecto.Query.from(
score in Benchmark.ScoreModel,
select: count(score.id)
)
)
end

defp count_leaderboards() do
Repo.one(
Ecto.Query.from(
leaderboard in Benchmark.LeaderboardModel,
select: count(leaderboard.id)
)
)
end
end

0 comments on commit 4acc657

Please sign in to comment.