diff --git a/apps/transport/lib/db/dataset_score.ex b/apps/transport/lib/db/dataset_score.ex
index fafd0c0407..e9ed4fe530 100644
--- a/apps/transport/lib/db/dataset_score.ex
+++ b/apps/transport/lib/db/dataset_score.ex
@@ -9,7 +9,7 @@ defmodule DB.DatasetScore do
typed_schema "dataset_score" do
belongs_to(:dataset, DB.Dataset)
- field(:topic, Ecto.Enum, values: [:freshness, :availability])
+ field(:topic, Ecto.Enum, values: [:freshness, :availability, :compliance])
field(:score, :float)
field(:timestamp, :utc_datetime_usec)
field(:details, :map)
diff --git a/apps/transport/lib/jobs/dataset_quality_score.ex b/apps/transport/lib/jobs/dataset_quality_score.ex
index e9a6b94ed2..b4039a6494 100644
--- a/apps/transport/lib/jobs/dataset_quality_score.ex
+++ b/apps/transport/lib/jobs/dataset_quality_score.ex
@@ -28,9 +28,9 @@ defmodule Transport.Jobs.DatasetQualityScore do
@impl Oban.Worker
def perform(%Oban.Job{args: %{"dataset_id" => dataset_id}}) do
- Transport.Jobs.DatasetFreshnessScore.save_freshness_score(dataset_id)
- Transport.Jobs.DatasetAvailabilityScore.save_availability_score(dataset_id)
- :ok
+ DB.DatasetScore
+ |> Ecto.Enum.values(:topic)
+ |> Enum.each(fn topic -> save_dataset_score(dataset_id, topic) end)
end
@doc """
@@ -63,12 +63,23 @@ defmodule Transport.Jobs.DatasetQualityScore do
@doc """
Exponential smoothing. See https://en.wikipedia.org/wiki/Exponential_smoothing
- iex> exp_smoothing(0.5, 1)
+ iex> exp_smoothing(0.5, 1, :freshness)
+ 0.55
+ iex> exp_smoothing(0.5, 1, 0.9)
0.55
+ iex> exp_smoothing(0.5, 1, :compliance)
+ 0.525
"""
- @spec exp_smoothing(float, float) :: float
- def exp_smoothing(previous_score, today_score) do
- alpha = 0.9
+ @spec exp_smoothing(float(), float(), atom() | float()) :: float()
+ def exp_smoothing(previous_score, today_score, :compliance) do
+ exp_smoothing(previous_score, today_score, 0.95)
+ end
+
+ def exp_smoothing(previous_score, today_score, topic) when topic in [:freshness, :availability] do
+ exp_smoothing(previous_score, today_score, 0.9)
+ end
+
+ def exp_smoothing(previous_score, today_score, alpha) do
alpha * previous_score + (1.0 - alpha) * today_score
end
@@ -146,7 +157,7 @@ defmodule Transport.Jobs.DatasetQualityScore do
computed_score =
case last_score = last_dataset_score(dataset_id, topic) do
%{score: previous_score} when is_float(previous_score) ->
- exp_smoothing(previous_score, today_score)
+ exp_smoothing(previous_score, today_score, topic)
_ ->
today_score
@@ -164,21 +175,82 @@ defmodule Transport.Jobs.DatasetQualityScore do
Map.fetch!(
%{
availability: &Transport.Jobs.DatasetAvailabilityScore.current_dataset_availability/1,
- freshness: &Transport.Jobs.DatasetFreshnessScore.current_dataset_freshness/1
+ freshness: &Transport.Jobs.DatasetFreshnessScore.current_dataset_freshness/1,
+ compliance: &Transport.Jobs.DatasetComplianceScore.current_dataset_compliance/1
},
topic
)
end
end
-defmodule Transport.Jobs.DatasetAvailabilityScore do
+defmodule Transport.Jobs.DatasetComplianceScore do
@moduledoc """
- Methods specific to the availability component of a dataset score.
+ Methods specific to the compliance component of a dataset score.
+
+ Computes and saves a compliance score for a dataset.
+
+ To compute this score:
+ - get the dataset's current resources
+ - for each resource we validated using a list of validators (`@validators`),
+ give it a score (1 if it's valid, 0 if it has an error)
+ - we compute an average of those scores to get a score at the dataset level
+ - that score is averaged with the dataset's last computed score, using exponential smoothing
+ (see the function `exp_smoothing/3`). This allows a score to reflect not only the current
+ dataset situation but also past situations.
"""
import Ecto.Query
- import Transport.Jobs.DatasetQualityScore
+ alias Transport.Jobs.DatasetQualityScore
+
+ @validators_with_has_errors [
+ Transport.Validators.TableSchema,
+ Transport.Validators.EXJSONSchema,
+ Transport.Validators.GBFSValidator
+ ]
+ @gtfs_validator Transport.Validators.GTFSTransport
+ @validators [@gtfs_validator | @validators_with_has_errors]
+ @validators_with_has_errors_names Enum.map(@validators_with_has_errors, & &1.validator_name())
+ @gtfs_validator_name @gtfs_validator.validator_name()
+
+ @spec current_dataset_compliance(integer()) :: %{score: float | nil, details: map()}
+ def current_dataset_compliance(dataset_id) do
+ validation_details =
+ dataset_id
+ |> DB.MultiValidation.dataset_latest_validation(@validators)
+ |> Enum.reject(fn {_resource_id, [multi_validation]} -> is_nil(multi_validation) end)
+
+ current_dataset_infos = Enum.map(validation_details, &resource_compliance(&1))
+
+ score =
+ current_dataset_infos |> Enum.map(fn %{compliance: compliance} -> compliance end) |> DatasetQualityScore.average()
+
+ %{score: score, details: %{resources: current_dataset_infos}}
+ end
+
+ @spec resource_compliance({integer(), [DB.MultiValidation.t()]}) :: %{
+ :compliance => float(),
+ :resource_id => integer(),
+ :raw_measure => map()
+ }
+ # Works for TableSchema + JSON Schema and GBFS
+ def resource_compliance(
+ {resource_id, [%DB.MultiValidation{validator: validator, result: %{"has_errors" => has_errors} = result}]}
+ )
+ when validator in @validators_with_has_errors_names do
+ compliance = if has_errors, do: 0.0, else: 1.0
+ %{compliance: compliance, resource_id: resource_id, raw_measure: result}
+ end
+
+ # For GTFS resources
+ def resource_compliance({resource_id, [%DB.MultiValidation{validator: @gtfs_validator_name, max_error: max_error}]}) do
+ compliance = if max_error in ["Fatal", "Error"], do: 0.0, else: 1.0
+ %{compliance: compliance, resource_id: resource_id, raw_measure: %{"max_error" => max_error}}
+ end
+end
+
+defmodule Transport.Jobs.DatasetAvailabilityScore do
+ @moduledoc """
+ Methods specific to the availability component of a dataset score.
- @doc """
Saves and computes an availability score for a dataset.
To compute this score:
@@ -186,16 +258,15 @@ defmodule Transport.Jobs.DatasetAvailabilityScore do
- for each resource, give it a score based on its availability over the last 24 hours
- we compute an average of those scores to get a score at the dataset level
- that score is averaged with the dataset's last computed score, using exponential smoothing
- (see the function `exp_smoothing/1` below). This allows a score to reflect not only the current
+ (see the function `exp_smoothing/3`). This allows a score to reflect not only the current
dataset situation but also past situations.
If any resource as an availability score of 0 (under 95% of availability over the last 24 hours),
the availability score of the dataset will be 0.
The rationale is that the entire dataset may be unusable if a single resource cannot be fetched.
"""
- def save_availability_score(dataset_id) do
- save_dataset_score(dataset_id, :availability)
- end
+ import Ecto.Query
+ import Transport.Jobs.DatasetQualityScore
@spec current_dataset_availability(integer()) :: %{score: float | nil, details: map()}
def current_dataset_availability(dataset_id) do
@@ -279,11 +350,7 @@ end
defmodule Transport.Jobs.DatasetFreshnessScore do
@moduledoc """
Methods specific to the freshness component of a dataset score.
- """
- import Ecto.Query
- import Transport.Jobs.DatasetQualityScore
- @doc """
Dataset "freshness" is the answer to the question: "When the data was downloaded, was it up-to-date?"
To give a score, we proceed this way:
@@ -291,15 +358,14 @@ defmodule Transport.Jobs.DatasetFreshnessScore do
- for each resource, give it a score
- we compute an average of those scores to get a score at the dataset level
- that score is averaged with the dataset's last computed score, using exponential smoothing
- (see the function `exp_smoothing/1`). This allows a score to reflect not only the current
+ (see the function `exp_smoothing/3`). This allows a score to reflect not only the current
dataset situation but also past situations. Typically, a dataset that had outdated resources
for the past year, but only up-to-date resources today is expected to have a low freshness score.
The interest of exponential smoothing is to give past scores an increasingly small weight as time
passes. To have a good score, a dataset must have up-to-date resources every day.
"""
- def save_freshness_score(dataset_id) do
- save_dataset_score(dataset_id, :freshness)
- end
+ import Ecto.Query
+ import Transport.Jobs.DatasetQualityScore
@spec current_dataset_freshness(integer()) :: %{score: float | nil, details: map()}
def current_dataset_freshness(dataset_id) do
diff --git a/apps/transport/lib/transport_web/templates/dataset/_dataset_scores.html.heex b/apps/transport/lib/transport_web/templates/dataset/_dataset_scores.html.heex
index 054142c631..8f5bea40c6 100644
--- a/apps/transport/lib/transport_web/templates/dataset/_dataset_scores.html.heex
+++ b/apps/transport/lib/transport_web/templates/dataset/_dataset_scores.html.heex
@@ -1,25 +1,21 @@
- <% freshness_score = Map.get(@dataset_scores, :freshness) %>
-
- <%= unless is_nil(freshness_score) do %>
- Score fraicheur : <%= DB.DatasetScore.score_for_humans(freshness_score) %>
-
- <%= freshness_score.timestamp |> Shared.DateTimeDisplay.format_datetime_to_paris(@locale) %>
-
- <% else %>
- Pas de score fraicheur
- <% end %>
-
- <% availability_score = Map.get(@dataset_scores, :availability) %>
-
- <%= unless is_nil(availability_score) do %>
- Score de disponibilité : <%= DB.DatasetScore.score_for_humans(availability_score) %>
-
- <%= availability_score.timestamp |> Shared.DateTimeDisplay.format_datetime_to_paris(@locale) %>
-
- <% else %>
- Pas de score de disponibilité
- <% end %>
-
+ <% components = [
+ {:freshness, "fraicheur"},
+ {:availability, "disponibilité"},
+ {:compliance, "conformité"}
+ ] %>
+ <%= for {topic, description} <- components do %>
+
+ <% score = Map.get(@dataset_scores, topic) %>
+ <%= unless is_nil(score) do %>
+ Score de <%= description %> : <%= DB.DatasetScore.score_for_humans(score) %>
+
+ <%= score.timestamp |> Shared.DateTimeDisplay.format_datetime_to_paris(@locale) %>
+
+ <% else %>
+ Pas de score de <%= description %>
+ <% end %>
+
+ <% end %>
Voir plus
diff --git a/apps/transport/test/transport/jobs/dataset_quality_score_test.exs b/apps/transport/test/transport/jobs/dataset_quality_score_test.exs
index 5788245af0..21ed89dbee 100644
--- a/apps/transport/test/transport/jobs/dataset_quality_score_test.exs
+++ b/apps/transport/test/transport/jobs/dataset_quality_score_test.exs
@@ -2,7 +2,8 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
use ExUnit.Case, async: true
use Oban.Testing, repo: DB.Repo
import DB.Factory
- import Transport.Jobs.{DatasetAvailabilityScore, DatasetFreshnessScore, DatasetQualityScore}
+
+ import Transport.Jobs.{DatasetAvailabilityScore, DatasetComplianceScore, DatasetFreshnessScore, DatasetQualityScore}
doctest Transport.Jobs.DatasetQualityScore, import: true
@@ -314,6 +315,89 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
end
end
+ describe "current dataset compliance" do
+ test "GTFS-RT and documentation resources are ignored, GeoJSON with schema is used" do
+ dataset = insert(:dataset, slug: Ecto.UUID.generate(), is_active: true)
+
+ geojson_resource =
+ insert(:resource, dataset: dataset, format: "geojson", schema_name: "etalab/#{Ecto.UUID.generate()}")
+
+ rh_geojson_resource = insert(:resource_history, resource: geojson_resource)
+
+ # A documentation resource, with a ResourceHistory but no validation is ignored
+ insert(:resource_history, resource: insert(:resource, dataset: dataset, format: "pdf", type: "documentation"))
+
+ # Should be ignored: we don't use the GTFS-RT validator
+ insert(:multi_validation, %{
+ validator: Transport.Validators.GTFSRT.validator_name(),
+ resource: insert(:resource, dataset: dataset, format: "gtfs-rt"),
+ max_error: "ERROR"
+ })
+
+ insert(:multi_validation, %{
+ resource_history: rh_geojson_resource,
+ validator: Transport.Validators.EXJSONSchema.validator_name(),
+ result: %{"has_errors" => true},
+ inserted_at: DateTime.utc_now() |> DateTime.add(-45, :minute)
+ })
+
+ assert %{
+ score: 0,
+ details: %{
+ resources: [%{compliance: 0.0, raw_measure: %{"has_errors" => true}, resource_id: geojson_resource.id}]
+ }
+ } == current_dataset_compliance(dataset.id)
+ end
+
+ test "with 2 GTFS: a Fatal and a Warning" do
+ dataset = insert(:dataset, slug: Ecto.UUID.generate(), is_active: true)
+
+ insert(:multi_validation, %{
+ resource_history:
+ insert(:resource_history, resource: gtfs_1 = insert(:resource, dataset: dataset, format: "GTFS")),
+ validator: Transport.Validators.GTFSTransport.validator_name(),
+ max_error: "Error"
+ })
+
+ insert(:multi_validation, %{
+ resource_history:
+ insert(:resource_history, resource: gtfs_2 = insert(:resource, dataset: dataset, format: "GTFS")),
+ validator: Transport.Validators.GTFSTransport.validator_name(),
+ max_error: "Warning"
+ })
+
+ assert %{
+ score: 0.5,
+ details: %{
+ resources: [
+ %{compliance: 0.0, raw_measure: %{"max_error" => "Error"}, resource_id: gtfs_1.id},
+ %{compliance: 1.0, raw_measure: %{"max_error" => "Warning"}, resource_id: gtfs_2.id}
+ ]
+ }
+ } == current_dataset_compliance(dataset.id)
+ end
+
+ test "with a single GTFS with a Warning" do
+ dataset = insert(:dataset, slug: Ecto.UUID.generate(), is_active: true)
+
+ insert(:multi_validation, %{
+ resource_history:
+ insert(:resource_history, resource: gtfs = insert(:resource, dataset: dataset, format: "GTFS")),
+ validator: Transport.Validators.GTFSTransport.validator_name(),
+ max_error: "Warning"
+ })
+
+ assert %{
+ score: 1.0,
+ details: %{
+ resources: [
+ %{compliance: 1.0, raw_measure: %{"max_error" => "Warning"}, resource_id: gtfs.id}
+ ]
+ }
+ } == current_dataset_compliance(dataset.id)
+ end
+ end
+
describe "last_dataset_score" do
test "fetches the latest non-nil score for yesterday" do
dataset = insert(:dataset, is_active: true)
@@ -324,27 +408,32 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
insert(:dataset_score, dataset: dataset, topic: :availability, score: 0.75, timestamp: yesterday_after)
insert(:dataset_score, dataset: dataset, topic: :freshness, score: 1.0, timestamp: yesterday)
insert(:dataset_score, dataset: dataset, topic: :freshness, score: nil, timestamp: yesterday_after)
+ insert(:dataset_score, dataset: dataset, topic: :compliance, score: 0.8, timestamp: yesterday)
+ insert(:dataset_score, dataset: dataset, topic: :compliance, score: nil, timestamp: yesterday_after)
assert %DB.DatasetScore{score: 0.75, topic: :availability} = last_dataset_score(dataset.id, :availability)
assert %DB.DatasetScore{score: 1.0, topic: :freshness} = last_dataset_score(dataset.id, :freshness)
+ assert %DB.DatasetScore{score: 0.8, topic: :compliance} = last_dataset_score(dataset.id, :compliance)
end
test "does not use scores if they are more than 7-day old" do
dataset = insert(:dataset, is_active: true)
+ topics = Ecto.Enum.values(DB.DatasetScore, :topic)
insert(:dataset_score,
dataset: dataset,
- topic: :availability,
+ topic: Enum.random(topics),
score: 0.5,
timestamp: DateTime.utc_now() |> DateTime.add(-8, :day)
)
- assert is_nil(last_dataset_score(dataset.id, :availability))
- assert is_nil(last_dataset_score(dataset.id, :freshness))
+ Enum.each(topics, fn topic ->
+ assert is_nil(last_dataset_score(dataset.id, topic))
+ end)
end
end
- describe "save_availability_score" do
+ describe "save_dataset_score for availability" do
test "computes availability from yesterday and today" do
dataset = insert(:dataset, is_active: true)
r1 = insert(:resource, dataset: dataset, is_community_resource: false)
@@ -368,11 +457,11 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
assert DB.DatasetScore |> DB.Repo.all() |> length() == 2
# expected score is 0.5 * 0.9 + 1. * (1. - 0.9) = 0.55
- # see exp_smoothing() function
+ # see exp_smoothing/3 function
assert {
:ok,
%DB.DatasetScore{id: _id, topic: :availability, score: 0.55, timestamp: timestamp, details: details}
- } = save_availability_score(dataset.id)
+ } = save_dataset_score(dataset.id, :availability)
assert DateTime.diff(timestamp, DateTime.utc_now(), :second) < 3
assert DB.DatasetScore |> DB.Repo.all() |> length() == 3
@@ -390,7 +479,59 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
end
end
- describe "save dataset average freshness" do
+ describe "save_dataset_score for compliance" do
+ test "computes compliance from yesterday and today" do
+ dataset = insert(:dataset, slug: Ecto.UUID.generate(), is_active: true)
+
+ insert(:multi_validation, %{
+ resource_history:
+ insert(:resource_history,
+ resource: %DB.Resource{id: gtfs_id} = insert(:resource, dataset: dataset, format: "GTFS")
+ ),
+ validator: Transport.Validators.GTFSTransport.validator_name(),
+ max_error: "Warning"
+ })
+
+ # we save a compliance score for yesterday
+ insert(:dataset_score,
+ dataset_id: dataset.id,
+ topic: :compliance,
+ score: 0.5,
+ timestamp: DateTime.utc_now() |> DateTime.add(-1, :day)
+ )
+
+ # a score for another topic
+ insert(:dataset_score,
+ dataset_id: dataset.id,
+ topic: :freshness,
+ score: 1.0,
+ timestamp: DateTime.utc_now() |> DateTime.add(-1, :day)
+ )
+
+ assert DB.DatasetScore |> DB.Repo.all() |> length() == 2
+
+ # expected score is 0.5 * 0.95 + 1. * (1. - 0.95) = 0.525
+ # see exp_smoothing/3 function
+ assert {
+ :ok,
+ %DB.DatasetScore{
+ id: _id,
+ topic: :compliance,
+ score: 0.525,
+ timestamp: timestamp,
+ details: %{
+ previous_score: 0.5,
+ resources: [%{compliance: 1.0, raw_measure: %{"max_error" => "Warning"}, resource_id: ^gtfs_id}]
+ }
+ }
+ } = save_dataset_score(dataset.id, :compliance)
+
+ assert DateTime.diff(timestamp, DateTime.utc_now(), :second) < 3
+ assert DB.DatasetScore |> DB.Repo.all() |> length() == 3
+ end
+ end
+
+ describe "save_dataset_score for freshness" do
test "compute freshness from yesterday and today" do
%{dataset: dataset, resource: %{id: resource_id}, resource_metadata: %{id: metadata_id}} =
insert_up_to_date_resource_and_friends()
@@ -413,10 +554,10 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
assert DB.DatasetScore |> DB.Repo.all() |> length() == 2
- {:ok, score} = save_freshness_score(dataset.id)
+ {:ok, score} = save_dataset_score(dataset.id, :freshness)
# expected score is 0.5 * 0.9 + 1. * (1. - 0.9)
- # see exp_smoothing() function
+ # see exp_smoothing/3 function
assert %{id: _id, topic: :freshness, score: 0.55, timestamp: timestamp, details: details} = score
assert DateTime.diff(timestamp, DateTime.utc_now()) < 3
@@ -440,6 +581,7 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
test "no score yesterday" do
%{dataset: dataset} = insert_up_to_date_resource_and_friends()
+
# an irrelevant score
insert(:dataset_score,
dataset_id: dataset.id,
@@ -450,7 +592,7 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
assert DB.DatasetScore |> DB.Repo.all() |> length() == 1
- {:ok, score} = save_freshness_score(dataset.id)
+ {:ok, score} = save_dataset_score(dataset.id, :freshness)
# expected score is todays's score (no existing history)
assert %{id: _id, topic: :freshness, score: 1.0, timestamp: timestamp} = score
@@ -463,7 +605,7 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
dataset = insert(:dataset)
assert DB.DatasetScore |> DB.Repo.all() |> length() == 0
- {:ok, score} = save_freshness_score(dataset.id)
+ {:ok, score} = save_dataset_score(dataset.id, :freshness)
# expected score is nil
assert %{id: _id, topic: :freshness, score: nil, timestamp: timestamp} = score
@@ -492,7 +634,7 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
assert DB.DatasetScore |> DB.Repo.all() |> length() == 2
- {:ok, score} = save_freshness_score(dataset.id)
+ {:ok, score} = save_dataset_score(dataset.id, :freshness)
# score is computed with today's freshness and last non nil score.
assert %{id: _id, topic: :freshness, score: 0.55, timestamp: timestamp} = score
@@ -513,7 +655,7 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
assert DB.DatasetScore |> DB.Repo.all() |> length() == 1
- {:ok, score} = save_freshness_score(dataset.id)
+ {:ok, score} = save_dataset_score(dataset.id, :freshness)
# score is computed from scratch, previous score is not used
assert %{id: _id, topic: :freshness, score: 1.0, timestamp: timestamp} = score
@@ -535,13 +677,13 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
timestamp: DateTime.utc_now() |> DateTime.add(-1, :day)
)
- {:ok, score} = save_freshness_score(dataset.id)
+ {:ok, score} = save_dataset_score(dataset.id, :freshness)
# score is computed with yesterday's score
assert %{id: id1, topic: :freshness, score: 0.55, timestamp: _timestamp} = score
# we force refresh the score computation
# it should use yesterday's score again
- {:ok, score} = save_freshness_score(dataset.id)
+ {:ok, score} = save_dataset_score(dataset.id, :freshness)
assert %{id: id2, topic: :freshness, score: 0.55, timestamp: _timestamp} = score
assert id2 > id1
end
@@ -550,7 +692,7 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
dataset = insert(:dataset, is_active: true)
%{id: resource_id} = insert(:resource, dataset_id: dataset.id, format: "csv", is_community_resource: false)
- {:ok, score} = save_freshness_score(dataset.id)
+ {:ok, score} = save_dataset_score(dataset.id, :freshness)
assert %{
topic: :freshness,
@@ -575,7 +717,10 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
describe "DatasetQualityScore" do
test "job saves multiple topics for a dataset" do
assert DB.DatasetScore |> DB.Repo.all() |> Enum.empty?()
- %{dataset: %DB.Dataset{id: dataset_id} = dataset} = insert_up_to_date_resource_and_friends()
+
+ %{dataset: %DB.Dataset{id: dataset_id} = dataset, resource: %DB.Resource{id: resource_id}} =
+ insert_up_to_date_resource_and_friends()
+
assert :ok == perform_job(Transport.Jobs.DatasetQualityScore, %{"dataset_id" => dataset.id})
assert [
@@ -590,7 +735,7 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
"format" => "GTFS",
"freshness" => 1.0,
"raw_measure" => %{"end_date" => _, "start_date" => _},
- "resource_id" => _
+ "resource_id" => ^resource_id
}
]
}
@@ -601,7 +746,18 @@ defmodule Transport.Test.Transport.Jobs.DatasetQualityScoreTest do
score: 1.0,
details: %{
"previous_score" => nil,
- "resources" => [%{"availability" => 1.0, "raw_measure" => nil, "resource_id" => _}]
+ "resources" => [%{"availability" => 1.0, "raw_measure" => nil, "resource_id" => ^resource_id}]
+ }
+ },
+ %DB.DatasetScore{
+ dataset_id: ^dataset_id,
+ topic: :compliance,
+ score: 1.0,
+ details: %{
+ "previous_score" => nil,
+ "resources" => [
+ %{"compliance" => 1.0, "raw_measure" => %{"max_error" => nil}, "resource_id" => ^resource_id}
+ ]
}
}
] = DB.DatasetScore |> DB.Repo.all()
diff --git a/apps/transport/test/transport_web/controllers/dataset_controller_test.exs b/apps/transport/test/transport_web/controllers/dataset_controller_test.exs
index c9afe3975a..346ba63930 100644
--- a/apps/transport/test/transport_web/controllers/dataset_controller_test.exs
+++ b/apps/transport/test/transport_web/controllers/dataset_controller_test.exs
@@ -381,6 +381,13 @@ defmodule TransportWeb.DatasetControllerTest do
topic: :freshness
)
+ insert(:dataset_score,
+ dataset: dataset,
+ timestamp: DateTime.utc_now() |> DateTime.add(-3, :hour),
+ score: 0.8,
+ topic: :compliance
+ )
+
insert(:dataset_score,
dataset: dataset,
timestamp: DateTime.utc_now() |> DateTime.add(-1, :hour),
@@ -390,7 +397,8 @@ defmodule TransportWeb.DatasetControllerTest do
assert %{
availability: %DB.DatasetScore{topic: :availability, score: nil},
- freshness: %DB.DatasetScore{topic: :freshness, score: 0.549}
+ freshness: %DB.DatasetScore{topic: :freshness, score: 0.549},
+ compliance: %DB.DatasetScore{topic: :compliance, score: 0.8}
} = dataset |> DB.DatasetScore.get_latest_scores(Ecto.Enum.values(DB.DatasetScore, :topic))
set_empty_mocks()
@@ -401,7 +409,8 @@ defmodule TransportWeb.DatasetControllerTest do
|> get(dataset_path(conn, :details, dataset.slug))
|> html_response(200)
- assert content =~ "Score fraicheur : 0.55"
+ assert content =~ "Score de fraicheur : 0.55"
+ assert content =~ "Score de conformité : 0.8"
assert content =~ "Score de disponibilité : \n"
end
@@ -418,7 +427,7 @@ defmodule TransportWeb.DatasetControllerTest do
refute dataset |> DB.DatasetScore.get_latest_scores([:freshness]) |> Enum.empty?()
set_empty_mocks()
content = conn |> get(dataset_path(conn, :details, dataset.slug)) |> html_response(200)
- refute content =~ "Score fraicheur"
+ refute content =~ "Score de fraicheur"
end
describe "scores_chart" do