From 5ca7a55471e74dc0d6ed7c4a028bb93d50a83158 Mon Sep 17 00:00:00 2001 From: <> Date: Thu, 8 Aug 2024 13:02:35 +0000 Subject: [PATCH] Deployed 2942e85 with MkDocs version: 1.6.0 --- .nojekyll | 0 404.html | 2464 ++++++ .../Developer Reference/index.html | 2510 ++++++ .../Infrastructure/ADP Portal/index.html | 2983 ++++++++ .../ASO Helm Library Chart/index.html | 4760 ++++++++++++ .../index.html | 2651 +++++++ .../Quality-Assurance-Overview/index.html | 2796 +++++++ .../secret-management/index.html | 2616 +++++++ .../adp-portal/ADP Portal/index.html | 2515 ++++++ .../adp-data-plugin/index.html | 3032 ++++++++ .../backstage-plugin-index/index.html | 2808 +++++++ .../backstage-setup/index.html | 2611 +++++++ .../catalog-data-sources/index.html | 2783 +++++++ .../github-app-permissions/index.html | 2978 ++++++++ .../govuk-branding/index.html | 2626 +++++++ .../github/pull_request_template/index.html | 2560 +++++++ .../index.html | 2685 +++++++ .../fcp-demo-services/overview/index.html | 3049 ++++++++ .../index.html | 2647 +++++++ .../onboarding-a-delivery-project/index.html | 2663 +++++++ Getting-Started/onboarding-a-user/index.html | 2643 +++++++ .../how-to-create-a-database/index.html | 2793 +++++++ .../index.html | 2961 ++++++++ .../how-to-create-a-system/index.html | 2604 +++++++ .../index.html | 2860 +++++++ .../how-to-create-acceptance-test/index.html | 2856 +++++++ .../how-to-create-performance-test/index.html | 2843 +++++++ .../migrate-a-delivery-project/index.html | 2646 +++++++ .../migrate-a-platform-service/index.html | 2609 +++++++ .../migrate-production-data/index.html | 2544 +++++++ .../adp-portal/adp-copilot/index.html | 2975 ++++++++ .../adp-portal/adp-portal-testing/index.html | 2582 +++++++ .../application-hosting/index.html | 2584 +++++++ .../azure-service-operator-for-aks/index.html | 2983 ++++++++ .../common-pipelines/index.html | 2742 +++++++ .../application-deployments/index.html | 2632 +++++++ .../flux-configuration/index.html | 2954 ++++++++ .../gitops-for-aks/overview/index.html | 2711 +++++++ .../repository-setup/index.html | 2933 +++++++ .../infrastructure-pipelines/index.html | 2670 +++++++ .../index.html | 2770 +++++++ .../istio-service-mesh-poc/index.html | 2921 +++++++ .../microservices-and-aks/index.html | 2670 +++++++ .../monitoring/alerts/index.html | 2747 +++++++ .../index.html | 2596 +++++++ .../monitoring/network-watcher/index.html | 2643 +++++++ .../monitoring/overview/index.html | 2710 +++++++ .../secrets-and-configuration/index.html | 2838 +++++++ .../architecture-overview/index.html | 2549 +++++++ Platform-Architecture/environments/index.html | 2758 +++++++ .../dynamics-and-platform-platform/index.html | 2767 +++++++ .../integration-patterns/overview/index.html | 2545 +++++++ .../permissions-model/index.html | 3205 ++++++++ .../ai-services/index.html | 3295 ++++++++ .../data-services/index.html | 2674 +++++++ .../integration-services/index.html | 2634 +++++++ Platform-Architecture/scaling/index.html | 2704 +++++++ Platform-Architecture/tech-radar/index.html | 3121 ++++++++ .../adp-platform-strategy/index.html | 2749 +++++++ .../documentation-approach/index.html | 2578 +++++++ .../service-deployment-strategy/index.html | 2732 +++++++ .../service-versioning-strategy/index.html | 2727 +++++++ assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.ad660dcc.min.js | 29 + assets/javascripts/bundle.ad660dcc.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.b8dbb3d2.min.js | 42 + .../workers/search.b8dbb3d2.min.js.map | 7 + assets/stylesheets/main.6543a935.min.css | 1 + assets/stylesheets/main.6543a935.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + images/ADP Tools Landscape.png | Bin 0 -> 454818 bytes images/Import-secrets-to-Key-Vault.png | Bin 0 -> 74723 bytes images/adp-create-delivery-programme.png | Bin 0 -> 130532 bytes images/adp-create-delivery-project.png | Bin 0 -> 130439 bytes images/adp-data-high-level-process-flow.png | Bin 0 -> 112314 bytes images/adp-data-portal-permissions.png | Bin 0 -> 63058 bytes images/adp-data.png | Bin 0 -> 135711 bytes images/adp-tech-radar.png | Bin 0 -> 179963 bytes images/adp-view-delivery-programme.png | Bin 0 -> 91780 bytes images/adp-view-delivery-project.png | Bin 0 -> 90566 bytes images/adp-view-edit-delivery-programme.png | Bin 0 -> 144455 bytes images/adp-view-edit-delivery-project.png | Bin 0 -> 141967 bytes images/aks-and-microservices.png | Bin 0 -> 218707 bytes ...ervices-advanced-production-deployment.png | Bin 0 -> 162553 bytes images/android-chrome-192x192.png | Bin 0 -> 24327 bytes images/android-chrome-512x512.png | Bin 0 -> 79074 bytes images/appconfig.png | Bin 0 -> 70837 bytes images/apple-touch-icon.png | Bin 0 -> 21564 bytes images/application-hosting.png | Bin 0 -> 138678 bytes images/aso-setup.png | Bin 0 -> 184735 bytes images/config-and-secrets.png | Bin 0 -> 406116 bytes images/config-structure.png | Bin 0 -> 40761 bytes images/creation-of-service.png | Bin 0 -> 67352 bytes images/delivery-project-id.png | Bin 0 -> 5179 bytes images/demo-business-context.png | Bin 0 -> 72492 bytes images/demo-microservice-architect-2.png | Bin 0 -> 157377 bytes images/demo-microservice-architecture.png | Bin 0 -> 371996 bytes images/diagrams/adp-copilot.png | Bin 0 -> 154009 bytes images/diagrams/ai-services-0.1.png | Bin 0 -> 167672 bytes images/diagrams/ai-services.png | Bin 0 -> 195779 bytes images/diagrams/portal-db.png | Bin 0 -> 163127 bytes images/documentation-approach.jfif | Bin 0 -> 105905 bytes images/favicon-16x16.png | Bin 0 -> 636 bytes images/favicon-32x32.png | Bin 0 -> 1631 bytes images/favicon-big.png | Bin 0 -> 102938 bytes images/favicon.ico | Bin 0 -> 15406 bytes images/favicon.png | Bin 0 -> 31831 bytes images/flux-dashboard.png | Bin 0 -> 143100 bytes images/gitops-for-aks.png | Bin 0 -> 85618 bytes images/helm-chart-values.png | Bin 0 -> 111633 bytes images/import-appconfig.png | Bin 0 -> 107539 bytes images/infra-repos.png | Bin 0 -> 122113 bytes images/istio-architecture.png | Bin 0 -> 32636 bytes images/istio-installation.png | Bin 0 -> 121501 bytes images/istio-mutual-tls.png | Bin 0 -> 97120 bytes images/istio-permissive-mtls.png | Bin 0 -> 106784 bytes images/istio-strict-mtls.png | Bin 0 -> 93083 bytes images/jeegerui.png | Bin 0 -> 111846 bytes images/kaili.png | Bin 0 -> 201527 bytes images/keyvault-secretes.png | Bin 0 -> 12289 bytes images/logos/aisearch.png | Bin 0 -> 12126 bytes images/logos/openai.png | Bin 0 -> 240020 bytes images/managed-prometheus-dashboard.png | Bin 0 -> 177642 bytes images/mointor-cluster.png | Bin 0 -> 266168 bytes images/monitor-containers.png | Bin 0 -> 75160 bytes images/monitor-grafana.png | Bin 0 -> 141321 bytes images/monitor-insights-nodes.png | Bin 0 -> 47004 bytes images/montior-insights.png | Bin 0 -> 107123 bytes images/network-watcher.png | Bin 0 -> 90548 bytes images/objectives-adp.png | Bin 0 -> 200500 bytes images/pipeline-layered-delivery.png | Bin 0 -> 193469 bytes images/pipeline-parameters.png | Bin 0 -> 85799 bytes images/pipeline-run-complete.png | Bin 0 -> 67108 bytes images/pipeline-screenshot.png | Bin 0 -> 20023 bytes images/post-deployment-trigger-design.png | Bin 0 -> 219523 bytes images/project-migration-stages.PNG | Bin 0 -> 192311 bytes images/project-migration-timeline.PNG | Bin 0 -> 100284 bytes images/qa/ADP QA Testing Pyramid.png | Bin 0 -> 29740 bytes .../qa/JMeter-How-Use-Variables-In-Script.png | Bin 0 -> 68344 bytes .../qa/JMeter-Perf-Test-Set-DefaultValues.png | Bin 0 -> 71830 bytes images/recommended-alerts.png | Bin 0 -> 96473 bytes images/run-appconfig.png | Bin 0 -> 23363 bytes images/run-pipeline.png | Bin 0 -> 35383 bytes images/site.webmanifest | 1 + images/variable-group-keyvalue.png | Bin 0 -> 17077 bytes images/variable-group.png | Bin 0 -> 13683 bytes images/vision-board.png | Bin 0 -> 206286 bytes images/waiting-stage.png | Bin 0 -> 30301 bytes images/yaml-pipeline.png | Bin 0 -> 186672 bytes index.html | 2746 +++++++ javascripts/loader.js | 333 + javascripts/tablesort.js | 6 + search/lunr.js | 3475 +++++++++ search/main.js | 109 + search/search_index.json | 1 + search/worker.js | 133 + sitemap.xml | 3 + sitemap.xml.gz | Bin 0 -> 127 bytes techdocs_metadata.json | 1 + test/index.html | 2605 +++++++ 195 files changed, 186498 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 Developer-Reference/Developer Reference/index.html create mode 100644 Developer-Reference/Infrastructure/ADP Portal/index.html create mode 100644 Developer-Reference/Infrastructure/ASO Helm Library Chart/index.html create mode 100644 Developer-Reference/Infrastructure/helm-library-memory-and-cpu-tiers/index.html create mode 100644 Developer-Reference/Quality-Assurance/Quality-Assurance-Overview/index.html create mode 100644 Developer-Reference/Secret Management/secret-management/index.html create mode 100644 Developer-Reference/adp-portal/ADP Portal/index.html create mode 100644 Developer-Reference/adp-portal/ongoing-development/adp-data-plugin/index.html create mode 100644 Developer-Reference/adp-portal/ongoing-development/backstage-plugin-index/index.html create mode 100644 Developer-Reference/adp-portal/ongoing-development/backstage-setup/index.html create mode 100644 Developer-Reference/adp-portal/ongoing-development/catalog-data-sources/index.html create mode 100644 Developer-Reference/adp-portal/ongoing-development/github-app-permissions/index.html create mode 100644 Developer-Reference/adp-portal/ongoing-development/govuk-branding/index.html create mode 100644 Developer-Reference/github/pull_request_template/index.html create mode 100644 Developer-Reference/github/verify-gitHub-commit-signatures/index.html create mode 100644 Developer-Reference/reference-applications/fcp-demo-services/overview/index.html create mode 100644 Getting-Started/onboarding-a-delivery-programme/index.html create mode 100644 Getting-Started/onboarding-a-delivery-project/index.html create mode 100644 Getting-Started/onboarding-a-user/index.html create mode 100644 How-to-guides/Platform-Services/how-to-create-a-database/index.html create mode 100644 How-to-guides/Platform-Services/how-to-create-a-platform-service/index.html create mode 100644 How-to-guides/Platform-Services/how-to-create-a-system/index.html create mode 100644 How-to-guides/Platform-Services/how-to-deploy-a-platform-service/index.html create mode 100644 How-to-guides/Testing/how-to-create-acceptance-test/index.html create mode 100644 How-to-guides/Testing/how-to-create-performance-test/index.html create mode 100644 Migrate-to-ADP/migrate-a-delivery-project/index.html create mode 100644 Migrate-to-ADP/migrate-a-platform-service/index.html create mode 100644 Migrate-to-ADP/migrate-production-data/index.html create mode 100644 Platform-Architecture/adp-portal/adp-copilot/index.html create mode 100644 Platform-Architecture/adp-portal/adp-portal-testing/index.html create mode 100644 Platform-Architecture/architectural-components/application-hosting/index.html create mode 100644 Platform-Architecture/architectural-components/ci-cd-and-automation/azure-service-operator-for-aks/index.html create mode 100644 Platform-Architecture/architectural-components/ci-cd-and-automation/common-pipelines/index.html create mode 100644 Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/application-deployments/index.html create mode 100644 Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/flux-configuration/index.html create mode 100644 Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/overview/index.html create mode 100644 Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/repository-setup/index.html create mode 100644 Platform-Architecture/architectural-components/ci-cd-and-automation/infrastructure-pipelines/index.html create mode 100644 Platform-Architecture/architectural-components/ci-cd-and-automation/naming-conventions-and-structures/index.html create mode 100644 Platform-Architecture/architectural-components/istio-service-mesh-poc/index.html create mode 100644 Platform-Architecture/architectural-components/microservices-and-aks/index.html create mode 100644 Platform-Architecture/architectural-components/monitoring/alerts/index.html create mode 100644 Platform-Architecture/architectural-components/monitoring/automated-monitoring-implementation/index.html create mode 100644 Platform-Architecture/architectural-components/monitoring/network-watcher/index.html create mode 100644 Platform-Architecture/architectural-components/monitoring/overview/index.html create mode 100644 Platform-Architecture/architectural-components/secrets-and-configuration/index.html create mode 100644 Platform-Architecture/architecture-overview/index.html create mode 100644 Platform-Architecture/environments/index.html create mode 100644 Platform-Architecture/integration-patterns/dynamics-and-platform-platform/index.html create mode 100644 Platform-Architecture/integration-patterns/overview/index.html create mode 100644 Platform-Architecture/permissions-model/index.html create mode 100644 Platform-Architecture/platform-azure-services/ai-services/index.html create mode 100644 Platform-Architecture/platform-azure-services/data-services/index.html create mode 100644 Platform-Architecture/platform-azure-services/integration-services/index.html create mode 100644 Platform-Architecture/scaling/index.html create mode 100644 Platform-Architecture/tech-radar/index.html create mode 100644 Platform-Strategy/adp-platform-strategy/index.html create mode 100644 Platform-Strategy/documentation-approach/index.html create mode 100644 Platform-Strategy/service-deployment-strategy/index.html create mode 100644 Platform-Strategy/service-versioning-strategy/index.html create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.ad660dcc.min.js create mode 100644 assets/javascripts/bundle.ad660dcc.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js.map create mode 100644 assets/stylesheets/main.6543a935.min.css create mode 100644 assets/stylesheets/main.6543a935.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 images/ADP Tools Landscape.png create mode 100644 images/Import-secrets-to-Key-Vault.png create mode 100644 images/adp-create-delivery-programme.png create mode 100644 images/adp-create-delivery-project.png create mode 100644 images/adp-data-high-level-process-flow.png create mode 100644 images/adp-data-portal-permissions.png create mode 100644 images/adp-data.png create mode 100644 images/adp-tech-radar.png create mode 100644 images/adp-view-delivery-programme.png create mode 100644 images/adp-view-delivery-project.png create mode 100644 images/adp-view-edit-delivery-programme.png create mode 100644 images/adp-view-edit-delivery-project.png create mode 100644 images/aks-and-microservices.png create mode 100644 images/aks-microservices-advanced-production-deployment.png create mode 100644 images/android-chrome-192x192.png create mode 100644 images/android-chrome-512x512.png create mode 100644 images/appconfig.png create mode 100644 images/apple-touch-icon.png create mode 100644 images/application-hosting.png create mode 100644 images/aso-setup.png create mode 100644 images/config-and-secrets.png create mode 100644 images/config-structure.png create mode 100644 images/creation-of-service.png create mode 100644 images/delivery-project-id.png create mode 100644 images/demo-business-context.png create mode 100644 images/demo-microservice-architect-2.png create mode 100644 images/demo-microservice-architecture.png create mode 100644 images/diagrams/adp-copilot.png create mode 100644 images/diagrams/ai-services-0.1.png create mode 100644 images/diagrams/ai-services.png create mode 100644 images/diagrams/portal-db.png create mode 100644 images/documentation-approach.jfif create mode 100644 images/favicon-16x16.png create mode 100644 images/favicon-32x32.png create mode 100644 images/favicon-big.png create mode 100644 images/favicon.ico create mode 100644 images/favicon.png create mode 100644 images/flux-dashboard.png create mode 100644 images/gitops-for-aks.png create mode 100644 images/helm-chart-values.png create mode 100644 images/import-appconfig.png create mode 100644 images/infra-repos.png create mode 100644 images/istio-architecture.png create mode 100644 images/istio-installation.png create mode 100644 images/istio-mutual-tls.png create mode 100644 images/istio-permissive-mtls.png create mode 100644 images/istio-strict-mtls.png create mode 100644 images/jeegerui.png create mode 100644 images/kaili.png create mode 100644 images/keyvault-secretes.png create mode 100644 images/logos/aisearch.png create mode 100644 images/logos/openai.png create mode 100644 images/managed-prometheus-dashboard.png create mode 100644 images/mointor-cluster.png create mode 100644 images/monitor-containers.png create mode 100644 images/monitor-grafana.png create mode 100644 images/monitor-insights-nodes.png create mode 100644 images/montior-insights.png create mode 100644 images/network-watcher.png create mode 100644 images/objectives-adp.png create mode 100644 images/pipeline-layered-delivery.png create mode 100644 images/pipeline-parameters.png create mode 100644 images/pipeline-run-complete.png create mode 100644 images/pipeline-screenshot.png create mode 100644 images/post-deployment-trigger-design.png create mode 100644 images/project-migration-stages.PNG create mode 100644 images/project-migration-timeline.PNG create mode 100644 images/qa/ADP QA Testing Pyramid.png create mode 100644 images/qa/JMeter-How-Use-Variables-In-Script.png create mode 100644 images/qa/JMeter-Perf-Test-Set-DefaultValues.png create mode 100644 images/recommended-alerts.png create mode 100644 images/run-appconfig.png create mode 100644 images/run-pipeline.png create mode 100644 images/site.webmanifest create mode 100644 images/variable-group-keyvalue.png create mode 100644 images/variable-group.png create mode 100644 images/vision-board.png create mode 100644 images/waiting-stage.png create mode 100644 images/yaml-pipeline.png create mode 100644 index.html create mode 100644 javascripts/loader.js create mode 100644 javascripts/tablesort.js create mode 100644 search/lunr.js create mode 100644 search/main.js create mode 100644 search/search_index.json create mode 100644 search/worker.js create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 techdocs_metadata.json create mode 100644 test/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..1b930ee --- /dev/null +++ b/404.html @@ -0,0 +1,2464 @@ + + + + + + + + + + + + + + + + + + + DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/Developer Reference/index.html b/Developer-Reference/Developer Reference/index.html new file mode 100644 index 0000000..0951e4c --- /dev/null +++ b/Developer-Reference/Developer Reference/index.html @@ -0,0 +1,2510 @@ + + + + + + + + + + + + + + + + + + + Developer Reference - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Developer Reference

+ +

Whos is this for:

+
    +
  • Tech Leads
  • +
  • Developers
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/Infrastructure/ADP Portal/index.html b/Developer-Reference/Infrastructure/ADP Portal/index.html new file mode 100644 index 0000000..c8a4eaa --- /dev/null +++ b/Developer-Reference/Infrastructure/ADP Portal/index.html @@ -0,0 +1,2983 @@ + + + + + + + + + + + + + + + + + + + + + + + Portal Development - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Azure Developer Portal

+

Welcome to the Azure Developer Portal (ADP) repository. The portal is built using Backstage.

+

Getting started

+

Prerequisites

+
    +
  • Access to a UNIX based operating system. If on Windows it is recommended that you use WSL
  • +
  • A GNU-like build environment available at the command line. For example, on Debian/Ubuntu you will want to have the make and build-essential packages installed
  • +
  • curl or wget installed
  • +
  • Node.js Active LTS release
  • +
  • Yarn
  • +
  • Docker
  • +
  • Git
  • +
+

See the Backstage Getting Started documentation for the full list of prerequisites.

+

Integrations

+

The portal is integrated with various 3rd party services. Connections to these services are managed through the environment variables below:

+
    +
  • GitHub (via a GitHub app)
  • +
  • Entra ID/Azure/ADO/Microsoft Graph (via an App Registration). ADO also uses a PAT token in some (very limited) scenarios.
  • +
  • Azure Managed Grafana
  • +
  • Azure Blob Storage (for TechDocs)
  • +
  • AKS
  • +
+

DevContainers

+

Development can be done within a devcontainer if you wish. Once the devcontainer is set up, simply fill out the .env file at the root and rebuild the container. Once rebuilt, you will need to log into the az cli to the tenant you wish to connect to using az login --tenant <TenantId>

+

If you are using VSCode, the steps are as follows:

+
    +
  1. Install the DevContainers extension
  2. +
  3. Open the command pallet and run the Dev Containers: Clone Repository in Container Volume command
  4. +
  5. Either select the github option and locate the repo, or enter https://github.com/DEFRA/adp-portal.git
  6. +
  7. Once the dev container is ready, open the .env file at the root, and fill it out with the variables described below
  8. +
  9. Open the command pallet and run the Dev Containers: Rebuild Container command
  10. +
  11. Once the dev container is rebuilt, run az login --tenant <YOUR_TENANT_ID>
  12. +
  13. To start the application, run yarn dev
  14. +
+

To sign commits using GPG from within the devcontainer, please follow the steps here

+

Environment Variables

+

The application requires the following environment variables to be set. We recommend creating a .env file in the root of your repo (this is ignored by Git) and pasting the variables in to this file. This file will be used whenever you run a script through yarn such as yarn dev.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
APP_BASE_URL=http://localhost:3000
+APP_BACKEND_BASE_URL=http://localhost:7007
+
+GITHUB_APP_ID=""
+GITHUB_CLIENT_ID=""
+GITHUB_CLIENT_SECRET=""
+GITHUB_PRIVATE_KEY=""
+GITHUB_ORGANIZATION=""
+
+AUTH_MICROSOFT_CLIENT_ID=""
+AUTH_MICROSOFT_CLIENT_SECRET=""
+AUTH_MICROSOFT_TENANT_ID=""
+
+BACKSTAGE_BACKEND_SECRET=""
+
+ADO_PAT=""
+ADO_ORGANIZATION=""
+
+GRAFANA_TOKEN=""
+GRAFANA_ENDPOINT=""
+
+TECHDOCS_AZURE_BLOB_STORAGE_ACCOUNT_NAME=""
+TECHDOCS_AZURE_BLOB_STORAGE_ACCOUNT_KEY=""
+
+ADP_PORTAL_PLATFORM_ADMINS_GROUP=""
+ADP_PORTAL_PROGRAMME_ADMINS_GROUP=""
+ADP_PORTAL_USERS_GROUP=""
+ADP_PORTAL_USERS_GROUP_PREFIX=""
+
+SND1_CLUSTER_NAME=""
+SND1_CLUSTER_API_SERVER_ADDRESS=""
+SND2_CLUSTER_NAME=""
+SND2_CLUSTER_API_SERVER_ADDRESS=""
+SND3_CLUSTER_NAME=""
+SND3_CLUSTER_API_SERVER_ADDRESS=""
+
+TZ=utc
+
+

To convert a GitHub private key into a format that can be used in the GITHUB_PRIVATE_KEY environment variable use one of the following scripts:

+

Powershell

+
$rsaprivkey = (Get-Content "private-key.pem" | Out-String) -replace "`r`n", "\n"
+
+

Shell

+
awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}' private-key.pem > rsaprivkey.txt
+
+

Techdocs

+

A hybrid strategy is implemented for techdocs which means documentation can be generated on the fly by out of the box generator or using an external pipeline. +All generated documentation are stored in Azure blob storage.

+

For more info please refer : Ref

+

Running locally

+

Run the following commands from the root of the repository:

+
1
+2
yarn install
+yarn dev
+
+

Configuration

+

If you want to override any settings in ./app-config.yaml, create a local configuration file named app-config.local.yaml and define your overrides here.

+

Mac

+

You need to have the azure cli installed and the azure development client installed

+

Login into both az, and azd before running the server.

+
1
+2
az login --tenant XXXXX.azure.com
+az auth login --tenant-id <your tenant id>
+
+

You must run the application in the same browser session, that the authentication ran in. If you use a "private window", new session, it will not have access to the required cookies to complete authentication, and you will get a 'user not found' error message

+

Feature Requests

+

If you have an idea for a new feature or an improvement to an existing feature, please follow these steps:

+
    +
  1. Check if the feature has already been requested by searching the project's issue tracker.
  2. +
  3. If the feature hasn't been requested, create a new issue and provide a clear description of the proposed feature and why it would be beneficial.
  4. +
+

Pull Requests

+

If you're ready to submit your code changes, please follow these steps specified in the pull_request_template

+

Code Style Guidelines

+

To maintain a consistent code style throughout the project, please adhere to the following guidelines:

+
    +
  1. Use descriptive variable and function names.
  2. +
  3. Follow the existing code formatting and indentation style.
  4. +
  5. Write clear and concise comments to explain complex code logic.
  6. +
+

License

+

Include information about the project's license and any relevant copyright notices.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/Infrastructure/ASO Helm Library Chart/index.html b/Developer-Reference/Infrastructure/ASO Helm Library Chart/index.html new file mode 100644 index 0000000..0ba66ee --- /dev/null +++ b/Developer-Reference/Infrastructure/ASO Helm Library Chart/index.html @@ -0,0 +1,4760 @@ + + + + + + + + + + + + + + + + + + + + + + + ASO Helm Library Chart - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

ADP Platform Azure Service Operator Helm Library Chart

+

A Helm chart library that captures general configuration for Azure Service Operator (ASO) resources. It can be used by any microservice Helm chart to import AzureServiceOperator K8s object templates configured to run on the ADP platform.

+

Including the library chart

+

In your microservice Helm chart: + * Update Chart.yaml to apiVersion: v2. + * Add the library chart under dependencies and choose the version you want (example below). Version number can include ~ or ^ to pick up latest PATCH and MINOR versions respectively. + * Issue the following commands to add the repo that contains the library chart, update the repo, then update dependencies in your Helm chart:

+
1
+2
+3
helm repo add adp https://raw.githubusercontent.com/defra/adp-helm-repository/main/adp-aso-helm-library
+helm repo update
+helm dependency update <helm_chart_location>
+
+

An example Demo microservice Chart.yaml:

+
1
+2
+3
+4
+5
+6
+7
+8
apiVersion: v2
+description: A Helm chart to deploy a microservice to the ADP Kubernetes platform
+name: demo-microservice
+version: 1.0.0
+dependencies:
+- name: adp-aso-helm-library
+  version: ^1.0.0
+  repository: https://raw.githubusercontent.com/defra/adp-helm-repository/main/adp-aso-helm-library
+
+

NOTE: We will use ACR where ASO Helm Library Chart can be published. So above dependencies will be changed to import library from ACR (In Progress).

+

Using the K8s object templates

+

First, follow the instructions for including the ASO Helm library chart.

+

The ASO Helm library chart has been configured using the conventions described in the Helm library chart documentation. The K8s object templates provide settings shared by all objects of that type, which can be augmented with extra settings from the parent (Demo microservice) chart. The library object templates will merge the library and parent templates. In the case where settings are defined in both the library and parent chart, the parent chart settings will take precedence, so library chart settings can be overridden. The library object templates will expect values to be set in the parent .values.yaml. Any required values (defined for each template below) that are not provided will result in an error message when processing the template (helm install, helm upgrade, helm template).

+

The general strategy for using one of the library templates in the parent microservice Helm chart is to create a template for the K8s object formateted as so:

+
{{- include "adp-aso-helm-library.namespace-queue" . -}}
+
+

All template required values

+

All the K8s object templates in the library require the following values to be set in the parent microservice Helm chart's values.yaml:

+
namespace: <string>
+
+

Environment specific Default values

+

The below values are used by the ASO templates internally, and their values are set using platform variables in adp-flux-services repository.

+

for e.g. NameSpace Queues will get created inside serviceBusNamespaceName namespace and postgres database will get created inside postgresServerName server.

+

Whilst the Platform orchestration will manage the 'platform' level variables, they can be optionally supplied in some circumstances. Examples include in sandpit/development when testing against team-specific infrastructure (that isn't Platform shared). So, if you have a dedicated Service Bus or Database Server instance, you can point to those to ensure you apps works as expected. Otherwise, don't supply the Platform level variables as these will be automatically managed and orchestrated throughout all the environments appropriately against core shared infrastructure. You (as a Platform Tenant) just supply your team-specific/instance specific infrastructure config' (i.e. Queues, Storage Accounts, Databases).

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
namespace: <string>                                     --namespace name
+environment: <string>                                   --environment name
+fluxConfigNamespace: <string>                           --fluxConfig namespace name
+subscriptionId: <string>                                --subscription Id
+serviceBusResourceGroupName: <string>                   --Name of the service bus resource group
+serviceBusNamespaceName: <string>                       --Name of the environment specific service bus 
+postgresResourceGroupName: <string>                     --Name of the Postgres server resource group
+postgresServerName: <string>                            --Name of the environment specific postgres server
+keyVaultResourceGroupName: <string>                     --Name of the keyvault resource group
+keyVaultName: <string>                                  --Name of the environment specific keyVaultName
+teamMIPrefix: <string>                                  --The prefix used for the ManageIdentity/UserAssignedIdentity resource name
+serviceName: <string>                                   --Service name. Suffix used for ManageIdentity/UserAssignedIdentity resource name
+teamResourceGroupName: <string>                         --Team ResourceGroup Name where team resources are created
+virtualNetworkResourceGroupName: <string>               --Virtual Network resource group
+virtualNetworkName: <string>                            --Virtual Network name
+storageAccountPrefix: <string>                          --The prefix used for the storage account resource name
+privateEndpointSubnetName: <string>                     --The name of the subnet for the service's private endpoint
+privateEndpointPrefix: <string>                         --The prefix used for the private endpoint resource name
+azrMSTPrivateLinkDNSUKSouthResourceGroupName: <string>  --NOT USED. We need to discuss this further
+azrMSTPrivateLinkDNSUKWestResourceGroupName: <string>   --NOT USED. We need to discuss this further
+azrMSTPrivateLinkDNSSubscriptionID: <string>            --NOT USED. We need to discuss this further
+
+
+commonTags:
+  environment: <string>
+  serviceCode: <string>
+  serviceName: <string>
+  serviceType: <string> (Shared or Dedicated)
+  kubernetes_cluster: <string>
+  kubernetes_namespace: <string>
+  kubernetes_label_serviceCode: <string>
+
+

NameSpace Queue

+
    +
  • Template file: _namespace-queue.yaml
  • +
  • Template name: adp-aso-helm-library.namespace-queue
  • +
+

An ASO NamespacesQueue object to create a Microsoft.ServiceBus/namespaces/queues resource.

+

A basic usage of this object template would involve the creation of templates/namespace-queue.yaml in the parent Helm chart (e.g. adp-microservice) containing:

+
{{- include "adp-aso-helm-library.namespace-queue" . -}}
+
+

Required values

+

The following values need to be set in the parent chart's values.yaml in addition to the globally required values listed above.

+

Note that namespaceQueues is an array of objects that can be used to create more than one queue.

+

Please note that the queue name is prefixed with the namespace internally. +For example, if the namespace name is "adp-demo" and you have provided the queue name as "queue1", then in the service bus, it creates a queue with the adp-demo-queue1 name.

+
1
+2
+3
namespaceQueues:      
+  - name: <string>     
+  - name: <string>
+
+

Optional values

+

The following values can optionally be set in the parent chart's values.yaml to set the other properties for servicebus queues:

+

owner property is used to control the ownership of the queue. The default value is yes and you don't need to provide it if you are creating and owning the queue. +If you are creating only role assignments for the queue you do not own, then you should explicitly set the owner flag to no so that it will only create the role assignments on the existing queue.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
namespaceQueues:
+  - name: <string>
+    owner: <string>                                    --Default yes    (Accepted values = yes, no)
+    deadLetteringOnMessageExpiration: <bool>           --Default false
+    defaultMessageTimeToLive: <string>                 --Default P14D
+    duplicateDetectionHistoryTimeWindow: <string>      --Default PT10M
+    enableBatchedOperations: <bool>                    --Default true
+    enableExpress: <bool>                              --Default false
+    enablePartitioning: <bool>                         --Default false
+    lockDuration: <string>                             --Default PT1M
+    maxDeliveryCount: <int>                            --Default 10
+    maxMessageSizeInKilobytes: <int>                   --Default 1024
+    maxSizeInMegabytes: <int>                          --Default 1024
+    requiresDuplicateDetection: <bool>                 --Default false
+    requiresSession: <bool>                            --Default false
+    roleAssignments:
+      - roleName: <string>                             --<RoleName. for e.g QueueSender>
+
+

NameSpace Queue: RoleAssignments

+

This template also optionally allows you to create Role Assignments by providing roleAssignments properties in the namespaceQueues object.

+

Below are the minimum values that are required to be set in the parent chart's values.yaml to create a roleAssignments.

+
1
+2
+3
+4
+5
namespaceQueues:      
+  - name: <string>     
+    roleAssignments:                                    <Array of Object> 
+      - roleName: <string>                              <RoleName. for e.g QueueSender>  (Allowed values QueueSender', 'QueueReceiver')
+      - roleName: <string> 
+
+

If you are creating only role assignments for the queue you do not own, then you should explicitly set the owner flag to no so that it will only create the role assignments on the existing queue.

+

Usage examples

+

The following section provides usage examples for the Namespace Queues template.

+
Example 1 : ServiceA in TeamA creates queue with 2 role assignments
+
1
+2
+3
+4
+5
namespaceQueues:
+  name: claim
+  roleAssignments:  
+    - roleName: QueueSender
+    - roleName: QueueReceiver   
+
+
Example 2 : ServiceB in TeamA needs to receive messages from existing claim queue. Note that owner is set to no.
+
1
+2
+3
+4
+5
namespaceQueues:
+  name: claim
+  owner: 'no'
+  roleAssignments:  
+    - roleName: QueueReceiver     
+
+

NameSpace Topic

+
    +
  • Template file: _namespace-topic.yaml
  • +
  • Template name: adp-aso-helm-library.namespace-topic
  • +
+

An ASO NamespacesTopic object to create a Microsoft.ServiceBus/namespaces/topics resource.

+

A basic usage of this object template would involve the creation of templates/namespace-topic.yaml in the parent Helm chart (e.g. adp-microservice) containing:

+
{{- include "adp-aso-helm-library.namespace-topic" . -}}
+
+

Required values

+

The following values need to be set in the parent chart's values.yaml in addition to the globally required values listed above.

+

Note that namespaceTopics is an array of objects that can be used to create more than one topic.

+

Please note that the topic name is prefixed with the namespace internally. +For example, if the namespace name is "adp-demo" and you have provided the topic name as "topic1," then in the service bus, it creates a topic with the "adp-demo-topic1" name.

+
1
+2
+3
namespaceTopics:          <Array of Object>
+  - name: <string>     
+  - name: <string>
+
+

Optional values

+

The following values can optionally be set in the parent chart's values.yaml to set the other properties for namespaceTopics:

+

owner property is used to control the ownership of the topic. The default value is yes and you don't need to provide it if you are creating and owning the topic. +If you are only creating role assignments for the topic you do not own, then you should explicitly set the owner flag to no so that it will only create the role assignments on the existing topic.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
namespaceTopics:
+  - name: <string>
+    owner: <string>                                    --Default yes     (Accepted values = yes, no)
+    defaultMessageTimeToLive: <string>                 --Default P14D
+    duplicateDetectionHistoryTimeWindow: <string>      --Default PT10M
+    enableBatchedOperations: <bool>                    --Default true
+    enableExpress: <bool>                              --Default false
+    enablePartitioning: <bool>                         --Default false
+    maxMessageSizeInKilobytes: <int>                   --Default 1024
+    maxSizeInMegabytes: <int>                          --Default 1024
+    requiresDuplicateDetection: <bool>                 --Default false
+    supportOrdering: <bool>                            --Default true
+    roleAssignments:
+      - roleName: <string>                             --<RoleName. for e.g TopicSender>
+
+

NameSpace Topic: RoleAssignments

+

This template also optionally allows you to create Role Assignments by providing roleAssignments properties in the namespaceTopics object.

+

Below are the minimum values that are required to be set in the parent chart's values.yaml to create a roleAssignments.

+
1
+2
+3
+4
+5
namespaceTopics:      
+  - name: <string>     
+    roleAssignments:                                    <Array of Object> 
+      - roleName: <string>                              <RoleName. for e.g TopicSender>  (Allowed values TopicSender', 'TopicReceiver')
+      - roleName: <string> 
+
+

If you are creating only role assignments for the Topic you do not own, then you should explicitly set the owner flag to no so that it will only create the role assignments on the existing Topic (See Example 2 in Usage examples section).

+

NameSpace Topic: Subscriptions, SubscriptionRules

+

This template also optionally allows you to create Topic Subscriptions and Topic Subscriptions Rules for a given topic by providing Subscriptions and SubscriptionRules properties in the topic object.

+

Below are the minimum values that are required to be set in the parent chart's values.yaml to create a NamespacesTopic, NamespacesTopicsSubscription and NamespacesTopicsSubscriptionsRule

+
1
+2
+3
+4
+5
+6
+7
+8
+9
namespaceTopics:      
+  - name: <string>     
+    topicSubscriptions:                     <Array of Object>  refer "Optional values for `topicSubscriptions`" section for topicSubscriptions optional properties
+      - name: <string>
+        topicSubscriptionRules:             <Array of Object>  refer "Optional values for `topicSubscriptionRules`" section for topicSubscriptionRules properties
+        - name: <string>                    
+          filterType: <string>              Accepted values : 'CorrelationFilter' or 'SqlFilter'
+          correlationFilter: <Object>       refer "Optional values for `topicSubscriptionRules`" section for correlationFilter properties
+          sqlFilter: <Object>               refer "Optional values for `topicSubscriptionRules`" section for sqlFilter properties
+
+

To create topicSubscriptions inside already existing topics, set the property owner to no. By default owner is set to yes which creates the topic name defined in values (See Example 4 in Usage examples section).

+

Optional values for topicSubscriptions

+

The following values can optionally be set in the parent chart's values.yaml to set the other properties for topicSubscriptions:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
topicSubscriptions:
+  - name: <string>
+    deadLetteringOnFilterEvaluationExceptions: <bool>       --Default false
+    deadLetteringOnMessageExpiration: <bool>                --Default false
+    defaultMessageTimeToLive: <string>                      --Default P14D
+    duplicateDetectionHistoryTimeWindow: <string>           --Default P10M
+    enableBatchedOperations: <bool>                         --Default true
+    forwardTo: <string>                                                 
+    isClientAffine: <bool>                                  --Default false
+    lockDuration: <string>                                  --Default PT1M
+    maxDeliveryCount: <int>                                 --Default 10
+    requiresSession: <bool>                                 --Default false
+
+

Optional values for topicSubscriptionRules

+

The following values can optionally be set in the parent chart's values.yaml to set the other properties for topicSubscriptionRules:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
topicSubscriptionRules:
+  - name: <string>
+    correlationFilter:
+      contentType: <string>
+      correlationId: <string>
+      label: <string>
+      messageId: <string>
+      replyTo: <string>                                     
+      replyToSessionId: <string>
+      sessionId: <string> 
+      to: <string> 
+    sqlFilter:
+      sqlExpression: <string>  
+
+

Usage examples

+

The following section provides usage examples for the Namespace Topic template.

+
Example 1 : ServiceA in TeamA creates Topic with 1 role assignment
+
1
+2
+3
+4
namespaceTopics:
+  name: claim-notify
+  roleAssignments:  
+    - roleName: TopicSender 
+
+
Example 2 : ServiceB in TeamA needs to receive messages from existing claim-notify Topic. Note that owner is set to no.
+
1
+2
+3
+4
+5
namespaceTopics:
+  name: claim-notify
+  owner: 'no'
+  roleAssignments:  
+    - roleName: TopicReceiver     
+
+
Example 3 : ServiceA in TeamA creates Topic with 1 role assignment, Topic Subscription and Topic Subscription Rule.
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
namespaceTopics:
+  name: claim-notify
+  roleAssignments:  
+    - roleName: TopicSender 
+  topicSubscriptions:
+    - name: claim-notify-subscription-01
+      topicSubscriptionRules:
+        - name: claim-notify-subscription-rule-01
+          filterType: SqlFilter
+          sqlFilter:
+            sqlExpression: "3=3"   
+        - name: claim-notify-subscription-rule-02
+          filterType: CorrelationFilter
+          sqlFilter:
+            contentType: "testvalue"      
+
+
Example 4: ServiceB in TeamA creates Topic Subscription in existing Topic.
+
1
+2
+3
+4
+5
+6
+7
namespaceTopics:
+  name: claim-notify
+  owner: "no"
+  roleAssignments:  
+    - roleName: TopicReceiver
+  topicSubscriptions:
+    - name: claim-notify-subscription-03   
+
+

Database for Postgres Flexible server template

+
    +
  • Template file: _flexible-servers-db.yaml
  • +
  • Template name: adp-aso-helm-library.flexible-servers-db
  • +
+

An ASO FlexibleServersDatabase object.

+

A basic usage of this object template would involve the creation of templates/flexible-servers-db.yaml in the parent Helm chart (e.g. adp-microservice) containing:

+
1
+2
+3
+4
{{- include "adp-aso-helm-library.flexible-servers-db" (list . "adp-microservice.postgres-flexible-db") -}}
+{{- define "adp-microservice.postgres-flexible-db" -}}
+# Microservice specific configuration in here
+{{- end -}}
+
+

Required values

+

The following values need to be set in the parent chart's values.yaml in addition to the globally required values listed above: +

1
+2
+3
+4
+5
postgres:
+  db:
+    name: <string> 
+    charset: <string>  
+    collation: <string> 
+
+Please note that the postgres DB name is prefixed with namespace internally. For example, if the namespace name is "adp-microservice" and you have provided the DB name as "demo-db," then in the postgres server, it creates a database with the name "adp-microservice-demo-db".

+

Usage examples

+

The following section provides usage examples for the Flexible-Servers-Db template.

+
Example 1 : ServiceA in TeamA creates payment database
+
1
+2
+3
+4
+5
postgres:
+  db:
+    name: payment 
+    charset: UTF8
+    collation: en_US.utf8
+
+

UserAssignedIdentity

+
    +
  • Template file: _userassignedidentity.yaml
  • +
  • Template name: adp-aso-helm-library.userassignedidentity
  • +
+

An ASO UserAssignedIdentity object to create a Microsoft.ManagedIdentity/userAssignedIdentities resource.

+

A basic usage of this object template would involve the creation of templates/userassignedidentity.yaml in the parent Helm chart (e.g. adp-microservice) containing:

+
{{- include "adp-aso-helm-library.userassignedidentity" . -}}
+
+

This template uses the below values, whose values are set using platform variables in the adp-flux-services repository as a part of the service's ASO helmrelease value configuration, and you don't need to set them explicitly in the values.yaml file.

+
    +
  • teamMIPrefix
  • +
  • serviceName
  • +
  • teamResourceGroupName
  • +
  • clusterOIDCIssuerUrl
  • +
+

UserAssignedIdentity Name is derived internally, and it is set to = {TEAM_MI_PREFIX}-{SERVICE_NAME}

+

For e.g. In SND1 if the TEAM_MI_PREFIX value is set to "sndadpinfmid1401" and SERVICE_NAME value is set to "adp-demo-service", then UserAssignedIdentity value will be : "sndadpinfmid1401-adp-demo-service".

+

Optional values

+

The following values can optionally be set in the parent chart's values.yaml to set the other properties for servicebus queues:

+
1
+2
userAssignedIdentity:      
+  location: <string>
+
+

This template also optionally allows you to create Federated credentials for a given User Assigned Identity by providing federatedCreds properties in the userAssignedIdentity object.

+

Below are the minimum values that are required to be set in the parent chart's values.yaml to create a userAssignedIdentity and federatedCreds.

+
1
+2
+3
+4
userAssignedIdentity:     
+    federatedCreds:                      <Array of Object> 
+      - namespace: <string>                    
+        serviceAccountName: <string>     
+
+

Usage examples

+

The following section provides usage examples for the UserAssignedIdentity template.

+
Example 1 : The below example will create userAssignedIdentity with one federated credential.
+
1
+2
+3
+4
userAssignedIdentity:
+  federatedCreds: 
+    - namespace: ffc-demo
+      serviceAccountName: ffc-demo 
+
+

Storage Account

+
    +
  • Template file: _storage-account.yaml
  • +
  • Template name: adp-aso-helm-library.storage-account.yaml
  • +
+
+

Version 2.0.0 and above

+

Starting from version 2.0.1, the Storage Account has been enhanced with role assignments. These data role assignments are now scoped at the storage account level, introducing two new data roles: DataWriter and DataReader.

+

The DataWriter role grants applications the ability to both read and write data in the blob container, tables, and files. Conversely, the DataReader role provides applications with read-only access to data in the blob container, tables, and files.

+
+

An ASO StorageAccount object to create a Microsoft.Storage/storageAccounts resource and optionally sub resources Blob Containers and Tables.

+ + + + + + + + + + + +
📝 By default, private endpoints are always enabled on storage accounts and publicNetworkAccess is disabled. Optionally, you can also configure ipRules in scenarios where you want to limit access to your storage account to requests originating from specified IP addresses.
+ + + + + + + + + + + +
📝 Please be aware that this template only includes A records in the central DNS zone for the Dev, Tst, Pre, and Prd environments. For Sandpit environments snd1, snd2, and snd3, it currently only generates a private endpoint without adding an A record to the DNS zone. You will need to separately add this entry via PowerShell script.
+

With this template, you can create the below resources. + - Storage Accounts + - Blob containers and RoleAssignments + - Tables and RoleAssignments

+

A basic usage of this object template would involve the creation of templates/storage-account.yaml in the parent Helm chart (e.g. adp-microservice) containing:

+
{{- include "adp-aso-helm-library.storage-account" . -}}
+
+

Default values for Storage account

+

Below are the default values used by the the storage account template internally, and they cannot be overridden by the user from the values.yaml file.

+
1
+2
+3
+4
+5
+6
kind: "StorageV2"             -- The type of storage account will always be "StorageV2"
+dnsEndpointType: "Standard"   -- The type of endpoint
+minimumTlsVersion: "TLS1_2"
+allowBlobPublicAccess: "false"
+sku: "Standard_LRS"           -- This is the sku for Sandpit environments (snd1, snd2, snd3)
+sku: "Standard_RAGRS"         -- This is the sku for production environments (dev, test, pre, prd)
+
+

Required values (Only Storage Account)

+

The following values need to be set in the parent chart's values.yaml in addition to the globally required values listed above.

+

Note that storageAccounts is an array of objects that can be used to create more than one Storage Accounts.

+

Please note that the storage account name must be unique across Azure. +storage account name is internally prefixed with the storageAccountPrefix.
+For instance, in the Dev environment, the storageAccountPrefix is configured as devadpinfst2401. If you input "claim" as the storage account name, the final storage account name will be devadpinfst2401claim.

+
1
+2
+3
storageAccounts:          <Array of Object>
+  - name: <string>        --Storage account name. Name should be Lowercase letters and numbers and Maximum character limit is `9`
+  - name: <string>
+
+

Required values (Storage Account with BlobContainers, Tables and FileShare)

+

The following values need to be set in the parent chart's values.yaml in addition to the globally required values listed above.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
Version 2.0.0 and above
+
+storageAccounts:           <Array of Object>
+  - name: <string>         --Storage account name. Name should be lowercase letters and numbers and Maximum character limit is `9`
+    roleAssignments:
+       roleName: <string>    --RoleAssignment Name (Accepted values = "DataWriter", "DataReader")
+  - name: <string>
+    blobContainers:
+      - name: <string>            --Blob container name. Name should be lowercase and can contain only letters, numbers, and the hyphen/minus (-) character. Character limit: 3-63
+      - name: <string>
+    tables: 
+      - name: <string>            --Table name. Name should be lowercase and may contain only alphanumeric characters. and Character limit: 3-63
+      - name: <string>
+    fileShares:
+      - name: <string>            --File Share name. Name should be lowercase and may contain only alphanumeric characters. and Character limit: 3-63
+      - name: <string>            --File Share name. Name should be lowercase and may contain only alphanumeric characters. and Character limit: 3-63
+        accessTier: <string>      --Access Tier. Allowed values are TransactionOptimized, Hot, Cold. Default is TransactionOptimized
+        shareQuota: <int>         --Storage Quota. Share Quota is defined in GiB. Default is 10
+
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
Version 1.0.* 
+
+storageAccounts:           <Array of Object>
+  - name: <string>         --Storage account name. Name should be lowercase letters and numbers and Maximum character limit is `9`
+  - name: <string>
+    blobContainers:
+      - name: <string>            --Blob container name. Name should be lowercase and can contain only letters, numbers, and the hyphen/minus (-) character. Character limit: 3-63
+        roleAssignments:
+          - roleName: <string>    --RoleAssignment Name (Accepted values = "BlobDataContributor", "BlobDataReader")
+      - name: <string>
+        roleAssignments:
+          - roleName: <string>
+    tables: 
+      - name: <string>            --Table name. Name should be lowercase and may contain only alphanumeric characters. and Character limit: 3-63
+        roleAssignments:
+          - roleName: <string>    --RoleAssignment Name (Accepted values = "TableDataContributor", "TableDataReader")
+      - name: <string>
+        roleAssignments:
+          - roleName: <string>
+    fileShares:
+      - name: <string>            --File Share name. Name should be lowercase and may contain only alphanumeric characters. and Character limit: 3-63
+      - name: <string>            --File Share name. Name should be lowercase and may contain only alphanumeric characters. and Character limit: 3-63
+        accessTier: <string>      --Access Tier. Allowed values are TransactionOptimized, Hot, Cold. Default is TransactionOptimized
+        shareQuota: <int>         --Storage Quota. Share Quota is defined in GiB. Default is 10
+
+

Optional values

+

The following values can optionally be set in the parent chart's values.yaml to set the other properties for storageAccounts:

+

For detailed description of each property see here

+

owner property is used to control the ownership of the storage account. The default value is yes and you don't need to provide it if you are creating and owning the storage account. +If you are creating Blob containers or Tables on the existing storage account that you do not own, then you should explicitly set the owner flag to no so that it will only create Blob containers or Tables on the existing storage account.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
storageAccounts:
+  - name: <string>
+    owner: <string>                                     --Default "yes"     (Accepted values = "yes", "no")
+    location: <string>                                  --Default "uksouth"
+    accessTier: <string>                                --Default "Hot"     (Accepted values = "Hot", "Cool")
+    allowCrossTenantReplication: <bool>                 --Default false
+    allowSharedKeyAccess: <bool>                        --Default false
+    defaultToOAuthAuthentication: <string>              --Default true    (Accepted values = "true", "false")
+    networkAcls:
+      ipRules: <array>                                    --Storage Firewall: Sets the IP ACL rules
+    storageAccountsBlobService:                         --Confugure properties for the blob service
+      changeFeed:                                         --The blob service properties for change feed events
+        enabled: <bool>                                       --Default false
+        retentionInDays: <int>                                --Applies when changeFeed.enabled is set to true
+      containerDeleteRetentionPolicy:                     --The blob service properties for container soft delete
+        enabled: <bool>                                       --Default true  
+        days: <int>                                           --Applies when containerDeleteRetentionPolicy.enabled is set to true. Default is 7 days       
+      deleteRetentionPolicy:                              --The blob service properties for blob soft delete
+        enabled: <bool>                                       --Default true                          
+        days: <int>                                           --Applies when deleteRetentionPolicy.enabled is set to true. Default is 7 days
+        allowPermanentDelete: <bool>                          --Default false 
+      isVersioningEnabled: <bool>                         --Default false. Versioning is enabled if set to true
+      restorePolicy:                                      --The blob service properties for blob restore policy 
+        enabled: <bool>                                       --Default false
+        days: <int>                                           --Applies when restorePolicy.enabled is set to true
+    storageAccountsFileService:                         --Confugure properties for the blob service
+      deleteRetentionPolicy:                              --The blob service properties for blob soft delete
+        enabled: <bool>                                       --Default true                          
+        days: <int>                                           --Applies when deleteRetentionPolicy.enabled is set to true. Default is 7 days
+    blobContainers:                                       --List of Blob containers and roleassignments
+      - name: <string>                                        --Blob container name
+        roleAssignments:                                      --List of roleAssignments scope to the blob container
+          - roleName: <string>                                --RoleAssignment Name (Accepted values = "BlobDataContributor", "BlobDataReader")    
+    tables:                                               --List of Tables and roleassignments
+      - name: <string>                                        --Table name
+        roleAssignments:                                      --List of roleAssignments scope to the table
+          - roleName: <string>                                --RoleAssignment Name (Accepted values = "TableDataContributor", "TableDataReader")
+
+

Usage examples

+

The following section provides usage examples for the storage account template.

+
Example 1 : Create 2 storage accounts
+
1
+2
+3
storageAccounts:
+  - name: storage01
+  - name: storage02
+
+
Example 2 : Create 1 storage account using large parameter set and storage firewall
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
storageAccounts:
+  - name: storage01
+    accessTier: Hot
+    location: uksouth
+    allowCrossTenantReplication: false
+    allowSharedKeyAccess: true
+    defaultToOAuthAuthentication: "false"
+    networkAcls:
+      ipRules:
+      - 82.13.86.001
+      - 82.13.86.002
+
+
Example 3 : Create 1 storage account and configure properties for the Storage Account BlobService
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
storageAccounts:
+  - name: storage01
+    storageAccountsBlobService:
+      changeFeed:
+        enabled: true
+        retentionInDays: 14
+      containerDeleteRetentionPolicy:
+        days: 14
+        enabled: true
+      deleteRetentionPolicy:
+        allowPermanentDelete: true
+        days: 20
+        enabled: false
+      isVersioningEnabled: true
+      restorePolicy:
+        enabled: true
+        days: 40
+
+
Example 4 : Create 1 storage account with 2 blob containers and 1 table with roleassignments (Version 1.0.*)
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
storageAccounts:
+  - name: storage01
+    blobContainers:  
+      - name: container-01
+        roleAssignments:
+          - roleName: 'BlobDataContributor' 
+      - name: container-02
+        roleAssignments:
+          - roleName: 'BlobDataReader'   
+    tables:  
+      - name: table01  
+        roleAssignments:
+          - roleName: 'TableDataContributor' 
+
+
Example 5 : Create 1 blob containers and 2 tables for the existing storage account in your team.
+
1
+2
+3
+4
+5
+6
+7
+8
storageAccounts:
+  - name: storage01
+    owner: "No"               --Note owner is set to 'No' to indicate storage account already exists and is owned by a different service in the team
+    blobContainers:  
+      - name: container-01
+    tables:  
+      - name: table01  
+      - name: table02
+
+
Example 6 : Create 1 storage account and configure properties for the Storage Account FileService
+
1
+2
+3
+4
+5
+6
storageAccounts:
+  - name: storage01
+    storageAccountsFileService:
+      deleteRetentionPolicy:
+        enabled: true
+        days: 20
+
+
Example 7 : Create 2 file share, one with default properties other one with specific properties
+
1
+2
+3
+4
+5
+6
+7
+8
storageAccounts:
+  - name: storage01
+    owner: "No"               --Note owner is set to 'No' to indicate storage account already exists and is owned by a different service in the team
+    fileShares:
+      - name: share-01
+      - name: share-02
+        accessTier: Hot
+        shareQuota: 50
+
+
Example 8 : Create 1 storage account with 2 blob containers and roleassignments scoped to storage account (Version 2.0.*)
+
1
+2
+3
+4
+5
+6
+7
storageAccounts:
+  - name: storage01
+    roleAssignments:
+      - roleName: 'DataWriter'
+    blobContainers:  
+      - name: container-01
+      - name: container-02
+
+
Example 9 : Create roleassignments for existing storage account in the team (Version 2.0.*)
+
1
+2
+3
+4
+5
storageAccounts:
+  - name: storage01
+    owner: 'no'
+    roleAssignments:
+      - roleName: 'DataReader'
+
+

Reference Table for Resource Names in Azure and Kubernetes

+

The table below shows the Azure Service Operator (ASO) resource naming convention in Azure and Kubernetes:

+

In the example below, the following platform values are used for demonstration purposes: +- namespace = 'ffc-demo' +- serviceName = 'ffc-demo-web' +- teamMIPrefix = 'sndadpinfmi1401' +- storageAccountPrefix = 'sndadpinfst1401' +- privateEndpointPrefix = 'sndadpinfpe1401' +- postgresServerName = 'sndadpdbsps1401' +- userassignedidentityName = 'sndadpinfmi1401-ffc-demo-web'

+

And the following user input values are used for demonstration purposes:

+
    +
  • QueueName = 'queue01'
  • +
  • TopicName = 'topic01'
  • +
  • TopicSubName = 'topicSub01'
  • +
  • DatabaseName = 'claim'
  • +
  • StorageAccountName = 'demo'
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Resource TypeResource Name Format in AzureResource Name Example in AzureResource Name Format in KubernetesResource Name Example in Kubernetes
NamespacesQueue{namespace}-{QueueName}ffc-demo-queue01{namespace}-{QueueName}ffc-demo-queue01
Queue RoleAssignmentNANA{userassignedidentityName}-{QueueName}-{RoleName}-rbac-{index}sndadpinfmi1401-ffc-demo-web-ffc-demo-queue01-queuereceiver-rbac-0
NamespacesTopic{namespace}-{TopicName}ffc-demo-topic01{namespace}-{TopicName}ffc-demo-topic01
NamespacesTopicsSubscription{namespace}-{TopicSubName}ffc-demo-topicSub01{namespace}-{TopicName}-{TopicSubName}-subscriptionffc-demo-topic01-topicsub01-subscription
Topic RoleAssignmentNANA{userassignedidentityName}-{TopicName}-{RoleName}-rbac-{index}sndadpinfmi1401-ffc-demo-web-ffc-demo-topic01-topicreceiver-rbac-0
Postgres Database{namespace}-{DatabaseName}ffc-demo-claim{postgresServerName}-{namespace}-{DatabaseName}sndadpdbsps1401-ffc-demo-claim
Manage Idenitty{teamMIPrefix}-{serviceName}sndadpinfmi1401-ffc-demo-web{teamMIPrefix}-{serviceName}sndadpinfmi1401-ffc-demo-web
StorageAccount{storageAccountPrefix}{StorageAccountName}sndadpinfst1401demo{serviceName}-{StorageAccountName}ffc-demo-web-sndadpinfst1401demo
StorageAccountsBlobServicedefaultdefault{serviceName}-{StorageAccountName}-defaultffc-demo-web-sndadpinfst1401demo-default
StorageAccountsBlobServicesContainer{ContainerName}container-01{serviceName}-{StorageAccountName}-default-{ContainerName}ffc-demo-web-sndadpinfst1401demo-default-container-01
StorageAccountsTableServicesTable{TableName}table01{serviceName}-{StorageAccountName}-default-{TableName}ffc-demo-web-sndadpinfst1401demo-default-table01
PrivateEndpoint{privateEndpointPrefix}-{ResourceName}-{SubResource}sndadpinfpe1401-sndadpinfst1401demo-blob{privateEndpointPrefix}-{ResourceName}-{SubResource}sndadpinfpe1401-sndadpinfst1401demo-blob
PrivateEndpointsPrivateDnsZoneGroupdefaultdefault{PrivateEndpointName}-defaultsndadpinfpe1401-sndadpinfst1401demo-blob-default
+

Helper templates

+

In addition to the K8s object templates described above, a number of helper templates are defined in _helpers.tpl that are both used within the library chart and available to use within a consuming parent chart.

+

Default check required message

+
    +
  • Template name: adp-aso-helm-library.default-check-required-msg
  • +
  • Usage: {{- include "adp-aso-helm-library.default-check-required-msg" . }}
  • +
+

A template defining the default message to print when checking for a required value within the library. This is not designed to be used outside of the library.

+

Tags

+
    +
  • Template name: adp-aso-helm-library.commonTags
  • +
  • Usage: {{- include "adp-aso-helm-library.commonTags" $ | nindent 4 }} ($ is mapped to the root scope)
  • +
+

Common tags to apply to tags of all ASO resource objects on the ADP K8s platform. This template relies on the globally required values listed above.

+

Labels

+
1
In Progress.
+
+

Annotations

+

For the Azure Service Operator to not delete the resources created in Azure on the deletion of the kubernetes resource manifest files, the below section can be added to Values.yaml in the parent helm chart.

+

This specifies the reconcile policy to be used and can be set to manage, skip or detach-on-delete. More info over here.

+
1
+2
asoAnnotations:
+  serviceoperator.azure.com/reconcile-policy: detach-on-delete
+
+

Licence

+

THIS INFORMATION IS LICENSED UNDER THE CONDITIONS OF THE OPEN GOVERNMENT LICENCE found at:

+

http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3

+

The following attribution statement MUST be cited in your products and applications when using this information.

+
+

Contains public sector information licensed under the Open Government license v3

+
+

About the licence

+

The Open Government Licence (OGL) was developed by the Controller of Her Majesty's Stationery Office (HMSO) to enable information providers in the public sector to license the use and re-use of their information under a common open licence.

+

It is designed to encourage use and re-use of information freely and flexibly, with only a few conditions.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/Infrastructure/helm-library-memory-and-cpu-tiers/index.html b/Developer-Reference/Infrastructure/helm-library-memory-and-cpu-tiers/index.html new file mode 100644 index 0000000..5dd2df6 --- /dev/null +++ b/Developer-Reference/Infrastructure/helm-library-memory-and-cpu-tiers/index.html @@ -0,0 +1,2651 @@ + + + + + + + + + + + + + + + + + + + + + + + Helm Library - Memory and CPU Tiers - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Helm Library - Memory and CPU Tiers

+

We have now implemented an abstraction layer within the adp-helm-library that allows the dynamic allocation of memory and CPU resources based on the memory and cpu tier provided in the values.yaml file.

+

The new memory and cpu tier values are in the below table:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TIERCPU-REQUESTCPU-LIMITMEMORY-REQUESTMEMORY-LIMIT
S50m50m50Mi50Mi
M100m100m100Mi100Mi
L150m150m150Mi150Mi
XL200m200m200Mi200Mi
XXL300m600m300Mi600Mi
CUSTOM<?><?><?><?>
+

Instructions

+

The following values can optionally be set in a values.yaml to select the required CPU and Memory for a container:

+
1
+2
+3
+4
+5
+6
container:
+  memCpuTier: <string S|M|L|XL|XXL|CUSTOM>
+  requestMemory: <string - REQUIRED if memCpuTier is CUSTOM>
+  requestCpu: <string - REQUIRED if memCpuTier is CUSTOM>
+  limitMemory: <string - REQUIRED if memCpuTier is CUSTOM>
+  limitCpu: <string - REQUIRED if memCpuTier is CUSTOM>
+
+

example 1 - select an Extra Large (XL) tier:

+
1
+2
container:
+  memCpuTier: XL
+
+

example 2 - select an Small (S) tier:

+
1
+2
container:
+  memCpuTier: S
+
+

example 3 - select custom values and provide your own values if the TIER sizes don't fit your requirements.

+
1
+2
+3
+4
+5
+6
container:
+  memCpuTier: CUSTOM
+  requestMemory: 10Mi
+  requestCpu: 10m
+  limitMemory: 200Mi
+  limitCpu: 200m
+
+

example 4 - The default is Medium (M). If this works for you then you don't need to pass a memCpuTier.

+
container: {}
+
+

NOTE: +If you do not add a 'memCpuTier' then the Tier will default to 'M'

+

NOTE: +You can also choose CUSTOM and provide your own values if the TIER sizes don't fit your requirements. +If you choose CUSTOM, requestMemory, requestCpu, limitMemory and limitCpu are required.

+

IMPORTANT: +Your team namespace will be given a fixed amount of resources via ResourceQuotas. +Once your cumulative resource request total passes the assigned quota on your namespace, all further deployments will be unsuccessful. If you require an increase to your ResourceQuota, you will need to raise a request via the ADP team. It's important you monitor the performance of your application and adjust pod requests and limits accordingly. Please choose the appropriate cpu and memory tier for your application or provide custom values for your CPU and Memory requests and limits.

+

References

+

https://learn.microsoft.com/en-us/azure/aks/developer-best-practices-resource-management

+

https://learn.microsoft.com/en-us/azure/aks/operator-best-practices-scheduler#enforce-resource-quotas

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/Quality-Assurance/Quality-Assurance-Overview/index.html b/Developer-Reference/Quality-Assurance/Quality-Assurance-Overview/index.html new file mode 100644 index 0000000..b833b81 --- /dev/null +++ b/Developer-Reference/Quality-Assurance/Quality-Assurance-Overview/index.html @@ -0,0 +1,2796 @@ + + + + + + + + + + + + + + + + + + + + + ADP Quality Assurance - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

ADP Quality Assurance Approach

+

This document outlines the QA approach for the Azure Developer Platform (APD). The objective of the quality assurance is to ensure that all business applications developed and hosted on the ADP meet DEFRA's standards of quality, reliability and performance.

+

The Quality Assurance approach follows the traditional QA Pyramid that modes how software testing is categorised and layered.

+

ADP Testing Pyramid

+

Guidelines

+
    +
  • All Testing Tooling that is used has to have been approved by the DEFRA Tools Authority
  • +
  • Test Results must confirm to the agreed DEFRA standards e.g. 90% code coverage for unit tests.
  • +
+

Selected Tools

+

Below are the tools that are currently supported on the ADP

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Type of TestTooling
Unit Testing"C# NUnit/ xUnit, Nsubstitute:
NodeJS: Jest
Functional/AcceptanceWebDriver.IO
Security Testing"OWASP ZAP (Zed Attack Proxy)
API Testing (Contract Testing)PACT Broker
Accessibility TestingAXE
Lighthouse?
Performance TestingJMeter, BrowserStack.
Azure Load Testing is under consideration
Exploratory Testing (Manual)ADO Test Plans
+

How to create Tests in the ADP

+

Development teams use the ADP Portal to scaffold a new service using one of the exemplar software templates (refer to How to create a platform service). Based on the template type (frontend or backend), basic tests will be included that the teams can build on as they add more functionality to the service.

+

The ADP Platform provides the ability to execute the above tests. These tests are executed as post deployment tests. The pipeline will check for the existence of specific docker-compose test files to determine if it can run tests. Refer to the how-to-guides for the different types of tests.

+
+

However, it is the responsibility of the delivery projects to ensure that the business services they are delivering have written sufficient tests for the different types of tests that meet DEFRA's standards.

+
+

Unit Tests

+

The supported programming frameworks are .NET/C# and NodeJS/Javascript.

+

The unit tests are executed in the CI Build Pipeline. SonarQube analysis has been integrated in the ADP Core Pipeline Teplate to ensure the code conforms to the DEFRA quality standards.

+

Links to the SonarCloud analysis, Synk Analysis will available in the component page of the service in the ADP Portal.

+

Functional/Acceptance Testing

+

These end-to-end tests for internal (via Open VPN) or public endpoints for frontends and APIs.

+

Refer to the Guide on how to create an Acceptance Test

+

Performance Testing

+

These tests should be executed against internal (via Open VPN) or public endpoints for frontends and APIs. Docker is used with BrowserStack to execute the peformance tests.

+

As a pre-requisite, Non Functional Requirements should be defined by the delivery project to set the baseline for the expected behavior e.g. expected average API response time, page load duration.

+

There are various types of performance tests.

+
    +
  • Load tests access the peformance of the service under a typical and peak load
  • +
  • Stress Load tests are intended to test the limits of the service.
  • +
  • Spike tests are similiar to stress load tests, however, they test the service with sudden surges in traffic.
  • +
  • Soak tests verify the reliability of the system over a long period time.
  • +
+

Refer to the Guide on how to create a Performance Test

+

Accessibility Testing

+

These tests verify that the all DEFRA public websites/business services are in compliance with WCAG 2.2 AA accessibility standard

+

Refer to the guidiance on Understanding accessibility requirements for public sector bodies

+

Security Testing

+

SonarQube Security Testing has been incorporated into the CI Build Pipeline. In addition to that, OWASP ZAP is executed as per of the post deployment tests.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/Secret Management/secret-management/index.html b/Developer-Reference/Secret Management/secret-management/index.html new file mode 100644 index 0000000..3628166 --- /dev/null +++ b/Developer-Reference/Secret Management/secret-management/index.html @@ -0,0 +1,2616 @@ + + + + + + + + + + + + + + + + + + + + + + + Secret Management - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Secret Management

+ +

ADP Services Secret Management

+

The secrets in ADP services are managed using Azure Key Vault. The secretes for each individual services are imported to the Azure Key Vault through ADO Pipeline tasks and are accessed by the services by using the application configuration YAML files. The importing of secretes to the Key Vault and referencing them by individual services are automated using ADO Pipelines. The detailed workflow for secret management includes the following steps:

+

1. Configure ADO Library

+
    +
  • +

    Create variable groups for each environments of the service in ADO Library.

    +

    Naming Convection: It follows the following convention for creating the variable groups for a service.

    +
    1
      {service name}-{env}
    +
    +
  • +
+
    +
  • +

    Example: The variable groups for different environment of a service are shown below.

    +

    image.png

    +
  • +
+
    +
  • +

    Add the variables and the values for the secretes in each of the variable groups.

    +

    Variable Naming Convection: It follows the following convention for creating the variables in variable groups.

    +
    1
      {service name}-{variable name}
    +
    +
  • +
+
    +
  • +

    Example: Secrete variables for a service are shown below.

    +

    image.png

    +
  • +
+

2. ADO Pipeline - Import secrets to Key Vault

+

The variables and values from the variable groups are automated to import to the Azure Key Vault using +Pipeline task and Power Shell scripts.

+
    +
  • Repo: ADO-PIPELINE-COMMON
  • +
+
    +
  • +

    Example: The code snippet involved in importing the secrets to the Azure Key Vault is + shown below.

    +

    image.png

    +
  • +
+

3. Azure Key Vault - Imported secretes

+

After the secretes are added to the ADO Library variable groups and the service CI pipeline run successfully would import the secrets to the Key Vault as shown below.

+
    +
  • +

    Example: Secretes imported to the Key Vault for a service are shown below

    +

    image.png

    +
  • +
+

4. App Config

+

Access the secrets from the Key Vault through appConfig YAML files included in each of the services. +There are two different kinds of appConfig files.

+
    +
  • +

    Environment specific appConfig file: Each service has it own environment specific appConfig file to access it + respective secrets from the Key Vault.

    +

    File Naming Convection:

    +
    1
    appConfig.{environment}.yaml
    +
    +
  • +
+
    +
  • Common appConfig file: There is a common appConfig.yaml file included in each of the service that defines the + environment variables that commonly used by all of the service environments.
  • +
+
    +
  • +

    Example: The appConfig files for different environments for a service are shown below.

    +

    image.png

    +
  • +
+

The type of the variable (key) that reference the secretes form the Key Vault should be defined as type: "keyvault" in the config YAML file.

+

4. ADO Pipeline - Import App Config

+

The Pipeline tasks shown below use the environment specific appConfig YAML files to import the secrets from Azure Key Vault to the service.

+
    +
  • Repo: ADO-PIPELINE-COMMON
  • +
+

image.png

+

5. Run Pipeline - appConfig only

+

The secretes can be added to the Key Vault and also referenced by the service using the appConfig files. This can be achieved by running the pipeline on selecting the Deploy App Config check box. This helps in running only the secrete management tasks instated of running all the tasks in the pipeline. This is useful when updating the secretes of a service.

+

image.png

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/adp-portal/ADP Portal/index.html b/Developer-Reference/adp-portal/ADP Portal/index.html new file mode 100644 index 0000000..4226532 --- /dev/null +++ b/Developer-Reference/adp-portal/ADP Portal/index.html @@ -0,0 +1,2515 @@ + + + + + + + + + + + + + + + + + + + Azure Developer Platform Portal (ADP) - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Azure Developer Platform Portal (ADP)

+

The Azure Development Platform Portal built using Backstage.

+

The Portal enables users to self service by providing the functionality below.

+
    +
  • Onboard delivery programmes, projects and users onto the ADP Platform
  • +
  • Enable Developers to create microservices using software templates based on GDS standards
  • +
+

Getting Started

+

Include instructions for getting the project set up locally. This may include steps to install dependencies, configure the development environment, and run the project.

+
1
+2
yarn install
+yarn dev
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/adp-portal/ongoing-development/adp-data-plugin/index.html b/Developer-Reference/adp-portal/ongoing-development/adp-data-plugin/index.html new file mode 100644 index 0000000..980cc44 --- /dev/null +++ b/Developer-Reference/adp-portal/ongoing-development/adp-data-plugin/index.html @@ -0,0 +1,3032 @@ + + + + + + + + + + + + + + + + + + + + + + + ADP Data PLugin - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

ADP Data Plugin

+

Overview

+

ADP enables authorised users to self-service through the platform, allowing them to create and manage required arms-length bodies, delivery programmes, and delivery teams. The data added can subsequently be edited by those authorised users and viewed by all.

+

The diagram below illustrates the high-level process flow of user journeys, distinguishing between four types of users: ADP Admins, Programme Managers, Project Managers, and Project Developers. ADP Admins have the authority to create new ALBs (Arms-Length Bodies) and initially seed Programme Managers. Programme Managers are able to onboard additional Programme and Project Managers, as well as to create Delivery Programmes and Projects. Project Managers have the capability to create new Delivery Projects and onboard Delivery Project Members. Finally, Project Developers are tasked with creating and managing platform services.

+

ADP Data Process Flow

+

Portal Permissions

+

In the table below you can see the permissions per ADP Persona. Please note that users are not restricted to one role/persona. A single person may be a Programme Manager, a Team Manager for a team in their Programme and a developer within that team.

+

ADP Data Permissions

+

Backend APIs

+

ALB

+

Endpoint: /armsLengthBody

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodParametersExample Request BodyExample Response
GETN/A[{ "creator":"user:default/johnDoe", "owner":"owner value", "title":"ALB 1", "alias":"ALB", "description": "ALB description", "url": null, "name":"alb-1", "id": "123", "created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}, { "creator":"user:default/johnDoe", "owner":"owner value", "title":"ALB 2", "alias":"ALB", "description": "ALB description", "url": null, "name":"alb-2", "id": "1234", "created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}]
GETidN/A{ "creator":"user:default/johnDoe", "owner":"owner value", "title":"ALB 1", "alias":"ALB", "description": "ALB description", "url": null, "name":"alb-1", "id": "123", "created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}
POST{ "title": "ALB", "description": "ALB Description" }{ "title": "ALB", "description": "ALB Description" , "url": null, "alias": null, "name": "alb", "creator":"user:default/johnDoe", "owner":"owner value", "id": "12345","created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}
PATCHid{ "id": "12345", "title": "Updated ALB Title" }{ "title": "Updated ALB Title", "description": "ALB Description" , "url": null, "alias": null, "name": "alb", "creator":"user:default/johnDoe", "owner":"owner value", "id": "12345","created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}
+

Endpoint: /armsLengthBodyNames

+ + + + + + + + + + + + + +
MethodExample Response
GET{"123": "ALB 1","1234": "ALB 2","12345": "ALB 3","123456": "ALB 4"}
+

Delivery Programmes

+

Endpoint: /deliveryProgramme

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodParametersExample Request BodyExample Response
GETN/A[{ "id": "123", "programme_managers":[], "arms_length_body_id": "12345", "title": "Delivery Programme 1", "name": "delivery-programme-1", "alias": "Delivery Programme", "description": "Delivery Programme description", "finance_code": "1", "delivery_programme_code": "123", "url": "exampleurl.com" , "created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}, { "id": "1234", "programme_managers":[], "arms_length_body_id": "12345", "title": "Delivery Programme 2", "name": "delivery-programme-2", "alias": "Delivery Programme", "description": "Delivery Programme description", "finance_code": "1", "delivery_programme_code": "123", "url": "exampleurl.com" , "created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}]
GETidN/A{ "id": "123", "programme_managers":[{"aad_entity_ref_id": "123", "id": "1", "delivery_programme_id" :"123", "email": "email@example.com", "name": "John Doe"}], "arms_length_body_id": "12345", "title": "Delivery Programme 1", "name": "delivery-programme-1", "alias": "Delivery Programme", "description": "Delivery Programme description", "finance_code": "1", "delivery_programme_code": "123", "url": "exampleurl.com" , "created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}
POST{ "programme_managers":[{"aad_entity_ref_id": "123"}, {"aad_entity_ref_id": "1234"}], "arms_length_body_id": "12345", "title": "Delivery Programme", "alias": "Delivery Programme", "description": "Delivery Programme description", "finance_code": "1", "delivery_programme_code": "123", "url": "exampleurl.com" }{ "id": "1234", "programme_managers":[], "arms_length_body_id": "12345", "title": "Delivery Programme", "name": "delivery-programme", "alias": "Delivery Programme", "description": "Delivery Programme description", "finance_code": "1", "delivery_programme_code": "123", "url": "exampleurl.com" , "created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}
PATCH{ "id": "1234", "title": "Updated Delivery Programme Title" }{ "id": "1234", "programme_managers":[], "arms_length_body_id": "12345", "title": "Updated Delivery Programme Title, "name": "delivery-programme", "alias": "Delivery Programme", "description": "Delivery Programme description", "finance_code": "1", "delivery_programme_code": "123", "url": "exampleurl.com" , "created_at": "2024-02-26T15:58:40.337Z", "updated_at": "2024-02-26T15:58:40.337Z","updated_by": "user:default/johnDoe"}
+

Endpoint: /programmeManager

+ + + + + + + + + + + + + +
MethodExample Response
GET[{"id": "5464de88-bc76-4a0b-a491-77284c392dab","delivery_programme_id": "0bd0cb6b-569a-4c0f-bc6d-5b8708f45c4a","aad_entity_ref_id": "aad entity ref id 1" "email": "example@defra.onmicrosoft.com","name": "name 1"},{"id": "f0bca259-d0a2-4d30-8166-4569f8e7b6f2","delivery_programme_id": "0bd0cb6b-569a-4c0f-bc6d-5b8708f45c4a","aad_entity_ref_id": "aad entity ref id 2","email": "example@defra.onmicrosoft.com","name": "name 2"}]
+

Endpoint: /catalogEntities

+ + + + + + + + + + + + + + + + + +
MethodExample Response
GET{"items": [ {"metadata": { "name": "example.onmicrosoft.com", "annotations": {"graph.microsoft.com/user-id": "aad entity ref id 1","microsoft.com/email": "example@defra.onmicrosoft.com"}},"spec": {"profile": {"displayName": "name 1"}}},{"metadata": {"name": "example.onmicrosoft.com","annotations": {"graph.microsoft.com/user-id": "aad entity ref id 2","microsoft.com/email": "example@defra.onmicrosoft.com"}},"spec": {"profile": {"displayName": "name 2"}}}]}
+

Delivery Projects

+

Enpoint: /deliveryProject

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodParametersExample Request BodyExample Response
GETN/A[{"id": "123","name": "delivery-project-1","title": "Delivery Project 1","alias": "Delivery Project","description": "Delivery Project Description","finance_code": "","delivery_programme_id": "1","delivery_project_code": "1","url": "","ado_project": "","created_at": "2024-04-03T06:41:56.257Z","updated_at": "2024-04-03T08:42:48.242Z","updated_by": "user:default/johnDoe.com"}, {"id": "1234","name": "delivery-project-2","title": "Delivery Project 2","alias": "Delivery Project","description": "Delivery Project Description", "finance_code": "", "delivery_programme_id": "2", "delivery_project_code": "2", "url": "", "ado_project": "", "created_at": "2024-04-03T05:42:31.914Z", "updated_at": "2024-04-03T08:43:03.622Z","updated_by": "user:default/johnDoe"}]
GETidN/A{"id": "1234","name": "delivery-project-2","title": "Delivery Project 2","alias": "Delivery Project","description": "Delivery Project Description", "finance_code": "", "delivery_programme_id": "2", "delivery_project_code": "2", "url": "", "ado_project": "", "created_at": "2024-04-03T05:42:31.914Z", "updated_at": "2024-04-03T08:43:03.622Z","updated_by": "user:default/johnDoe"}
POST"title": "Delivery Project 3","alias": "Delivery Project","description": "Delivery Project Description", "finance_code": "", "delivery_programme_id": "3", "delivery_project_code": "3", "url": "", "ado_project": ""}{"id": "12345","name": "delivery-project-3","title": "Delivery Project 3","alias": "Delivery Project","description": "Delivery Project Description", "finance_code": "", "delivery_programme_id": "3", "delivery_project_code": "3", "url": "", "ado_project": "", "created_at": "2024-04-03T05:42:31.914Z", "updated_at": "2024-04-03T08:43:03.622Z","updated_by": "user:default/johnDoe"}
PATCH{ "id": "12345", "title": "Updated Delivery Project Title" }{"id": "12345","name": "delivery-project-3","title": "Updated Delivery Project Title","alias": "Delivery Project","description": "Delivery Project Description", "finance_code": "", "delivery_programme_id": "3", "delivery_project_code": "3", "url": "", "ado_project": "", "created_at": "2024-04-03T05:42:31.914Z", "updated_at": "2024-04-03T08:43:03.622Z","updated_by": "user:default/johnDoe"}
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/adp-portal/ongoing-development/backstage-plugin-index/index.html b/Developer-Reference/adp-portal/ongoing-development/backstage-plugin-index/index.html new file mode 100644 index 0000000..a06c2ea --- /dev/null +++ b/Developer-Reference/adp-portal/ongoing-development/backstage-plugin-index/index.html @@ -0,0 +1,2808 @@ + + + + + + + + + + + + + + + + + + + + + + + Backstage Plugin Index - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Backstage Plugin Index

+

We can extend Backstage's functionality by creating and installing plugins. Plugins can either be created by us (1st party) or we can install 3rd party plugins. The majority of 3rd party plugins are free and open source, however there are some exceptions.

+

This page tracks the plugins we have installed and the plugins we would like to evaluate.

+

Considerations for 3rd party plugins

+
    +
  • Does it bring value to the portal? Will it help the user complete a task more easily than if the plugin had not been installed? Will it offer the user more context about a software component?
  • +
  • Is it actively maintained? Are bugs regularly addressed? Are the maintainers responsive to new issues, pull requests, etc.
  • +
  • Is it compatible with the version of Backstage we're using? Backstage is updated regularly and plugins need to keep up. As we maintain and update Backstage we will need to keep any plugins we're using up to date - and the more plugins we have installed the more of a headache this will be!
  • +
  • What are the licencing requirements? Most 3rd party plugins are licensed under a FOSS licence, however there are a few commercial plugins out there for which we need a business case.
  • +
  • How does it integrate with other systems? If the plugin needs to integrate with an external service through OAUTH, a service principal, etc what needs to be configured and what permissions does the plugin need?
  • +
+

Plugins index

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PluginCategoryStatusAuthorDescription
azure-devopsCatalogImplementedBackstageDisplays Pipeline runs on component entity pages. We're not using the repos or README features. Requires components to have two annotations -dev.azure.com/project contains the ADO project name and dev.azure.com/build-definition contains the pipeline name.
GitHub pull requestsCatalogImplementedRoadieAdds a dashboard displaying GitHub pull requests on component entity pages. Requires components to have the github.com/project-slug in their catalog-info file.
Grafana dashboardCatalogImplementedK-PhoenDisplays Grafana alerts and dashboards for a component. Note that we cannot use the Dashboard embed - Managed Grafana does not allow us to configure embedding.
Azure DevOps scaffolder actionsScaffolderImplementedADPCustom scaffolder actions to get service connections, create and run pipelines, and permit access to ADO resources. Loosely based on the3rd party package by Parfumerie Douglas.
GitHub scaffolder actionsScaffolderImplementedADPCustom scaffolder actions to create GitHub teams and assign to repositories.
LighthouseCatalogAgreedSpotifyGenerates on-demand Lighthouse audits and tracks trends directly in Backstage. Helps to improve accessibility, performance and adhere to best practices. Requires PostgreSQL database and a running Lighthouse instance of thelight-house-audit-service API which executes the tests before sending results back to the plugin.
SonarQubeCatalogAgreedSDA-SEAdds frontend visualisation of code statistics from SonarCloud or SonarQube. Requires SonarCloud subscription
PrometheusCatalogAgreedRoadieAdds Embedded Prometheus Graphs and Alerts into backstage. Requires setting up a new proxy endpoint for the Prometheus API in the app-config.yaml
FluxCatalogAgreedWeaveworksThe Flux plugin for Backstage provides views of Flux resources available in Kubernetes clusters.
KubernetesCatalogAgreedSpotifyKubernetes in Backstage is a tool that's designed around the needs of service owners, not cluster admins. Now developers can easily check the health of their services no matter how or where those services are deployed — whether it's on a local host for testing or in production on dozens of clusters around the world.
SnykCatalogAssessSynkSnyk Backstage plugin leverages the Snyk API to enable Snyk data to be visualized directly in Backstage.
KubeCostCatalogAssessSuXess-ITKubecost is a plugin to help engineers get information about cost usage/prediction of their deployment. Some development work needed around namespaces. It doesn’t look regularly maintained or updated regularly
+

Key

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
StatusDescription
AssessSuggestions that we need to evaluate before accepting them into the backlog.
AgreedDiscussed and agreed to accept it, but more work needed to flesh out details.
AcceptedThe plugin is suitable for our portal and a story for installing it as been added to the backlog.
ImplementedThe plugin has been implemented.
RejectedThe plugin is unsuitable for the portal and we won't be installing it.
+ + + + + + + + + + + + + + + + + +
CategoryDescription
CatalogThe plugin extends the software catalog, e.g. through a card, or full page dashboard.
ScaffolderThe plugin adds custom actions to the component scaffolder.
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/adp-portal/ongoing-development/backstage-setup/index.html b/Developer-Reference/adp-portal/ongoing-development/backstage-setup/index.html new file mode 100644 index 0000000..f68b15d --- /dev/null +++ b/Developer-Reference/adp-portal/ongoing-development/backstage-setup/index.html @@ -0,0 +1,2611 @@ + + + + + + + + + + + + + + + + + + + + + + + Backstage Setup - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Backstage Setup

+

The ADP Portal is built on Backstage, an open-source platform for building developer portals. Backstage is a Node application which contains a backend API and React based front end. This page outlines the steps you need to follow to set up and run Backstage locally.

+

[[TOC]]

+

🏗️ Setup

+

Install pre-requisites

+

Backstage has a few requirements to be able to run. These are detailed in the Backstage documentation, with some requirements detailed below.

+

🐧 WSL

+

Backstage requires a UNIX environment. If you're using Linux or a Mac you can skip this section, but if you're on a Windows machine you will need to install WSL.

+

WSL can either be installed via the command line (follow Microsoft's instructions) or from the Microsoft Store. You will then need to install a Linux distribution. Ubuntu is recommended; either download from the Microsoft Store or run wsl --install -d Ubuntu-22.04 in your terminal.

+

Familiarise yourself with:

+ +
+

⚠️ Everything you do with Backstage from this point forwards must be done in your WSL environment. Don't attempt to run Backstage from your Windows environment - it won't work!

+
+

Node.js

+

You will need either Node 16 or 18 to run Backstage. It will not run on Node 20.

+

The recommended way to use the correct Node version is to use nvm.

+
+

⚠️ If on a PC make sure you install and configure nvm in your WSL environment.

+
+

You will then need to install Yarn globally. Run npm install --global yarn in your WSL environment.

+

Git

+

Make sure you've got Git configured. If on WSL follow the steps to make sure you've got Git configured correctly - your settings from Windows will not carry over.

+

Ensure you have a GPG key set up so that you can sign your commits. See the guide on verifying Git signatures. If you have already set up a GPG key on Windows this will need to be exported and then imported in to your WSL environment.

+

To export on Windows using Kleopatra, see here. To import using gpg on WSL, see here.

+

🔨 Build tools

+

If installing WSL for the first time you will likely need to install the build-essential package. Run sudo apt install build-essential.

+

☁️ Azure CLI

+

Check if you have the Azure CLI installed in your WSL environment. Run az --version. If this returns an error you need to install the Azure CLI: curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash. See Install the Azure CLI on Linux.

+

We have integrated Backstage with Azure AD for authentication. For this to work you will need to sign in to the O365_DEFRADEV tenant via the Azure CLI.

+

Project Setup

+

After installing and configuring pre-requisites we can clone the adp-portal repo, configure Backstage, and run the application.

+
+

⚠️ Remember, if on Windows these steps must be followed in your WSL environment.

+
+

📃 Clone the repo

+

If you haven't already, create a folder in your Home directory where you will can clone your repos.

+
~$ mkdir projects && cd projects
+
+

Clone the adp-portal repo into your projects folder.

+

Set environment variables

+

Client IDs, secrets, etc for integrations with 3rd parties are read from environment variables. In the root of a repo there is a file named env.example.sh. Duplicate this file and rename it to env.sh.

+

A senior developer will be able to provide you with the values for this file.

+

A private key is also required for the GitHub app. Again, a senior developer will be able to provide you with this key.

+
+

ℹ️ Later on down the line we are hoping to move these environment variables to Key Vault

+
+

To load the environment variables in to your terminal session run . ./env.sh. Make sure you include both periods - the first ensures that the environment variables are loaded into the correct context.

+

▶️ Run the application

+

The application needs to be run from the /app folder - run cd app if you're in the root of the project.

+

Run the following two commands to install dependencies, and build and run the application:

+
1
+2
yarn install
+yarn dev
+
+

To stop the application, press <kbd>Ctrl </kbd>+<kbd>C </kbd> twice.

+

🆘 Troubleshooting

+

If you have issues starting Backstage, check the output in your terminal. Common errors are below:

+

"Backend failed to start up Error: Invalid Azure integration config for dev.azure.com: credential at position 1 is not a valid credential" - Have you loaded your environment variables? Run . ./env.sh from the root of the repo, then try running the application again.

+

"MicrosoftGraphOrgEntityProvider:default refresh failed, AggregateAuthenticationError: ChainedTokenCredential authentication failed" - have you logged in to the Azure CLI? Run az login and make sure you sign in to the O365_DEFRADEV tenant. Try running the application again.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/adp-portal/ongoing-development/catalog-data-sources/index.html b/Developer-Reference/adp-portal/ongoing-development/catalog-data-sources/index.html new file mode 100644 index 0000000..d90211a --- /dev/null +++ b/Developer-Reference/adp-portal/ongoing-development/catalog-data-sources/index.html @@ -0,0 +1,2783 @@ + + + + + + + + + + + + + + + + + + + + + + + Catalog Data Sources - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Catalog Data Sources

+

Overview

+

Catalog data is pulled in from multiple sources which are configured in the app-config.yaml file. Individual entities are defined by a YAML file.

+

Catalog Sources

+

Components

+

Backstage regularly scans the DEFRA GitHub organisation for repos containing a catalog-info.yaml file in the root of the master branch. The FFC demo services contain examples of this file (see ffc-demo-web). New components scaffolded through Backstage will be contain this file (but it may need further customisation), existing components will need to have the file added in manually.

+

A catalog-info.yaml for a component file might look like this:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
apiVersion: backstage.io/v1alpha1
+kind: Component
+metadata:
+  name: "ffc-demo-web"
+  description: |
+    Digital service mock to claim public money in the event property subsides into mine shaft.
+    This is the web front end for the application. It contains a simple claim submission journey where user input data is cached in Redis.
+    On submission the data is pulled from Redis and passed to the message service.
+  annotations:
+    github.com/project-slug: DEFRA/ffc-demo-web
+    dev.azure.com/project: DEFRA-FFC
+    dev.azure.com/build-definition: DEFRA.ffc-demo-web
+    sonarqube.org/project-key: adp-ffc-demo-web
+  tags:
+    - node
+    - service-bus
+    - redis
+    - external
+    - front-end
+spec:
+  type: frontend
+  lifecycle: beta
+  owner: "group:default/fcp-demo"
+  system: fcp-demo-service
+  dependsOn:
+    - "resource:default/fcp-demo-claim-queue"
+    - "resource:default/ADPINFSB01"
+
+

The Backstage documentation describes the format of this file - it is similar to a Kubernetes object config file. The key properties we need to set are:

+
    +
  • metadata.name - The name of the component. Must be unique, and should match the repository name.
  • +
  • metadata.annotations - Annotations are used by integrations with 3rd party systems. In the example above, github.com/project-slug is used to pull data from the specified project into the Pull Requests dashboard; the dev.azure.com annotations pull pipeline runs into the CI/CD dashboard; sonarqube.org/project-key pulls in Sonarcloud metrics for the specified project.
  • +
  • spec.type - The type of component. In ADP we currently have two types - frontend (for a web application) and backend (for an API or backend service).
  • +
  • spec.lifecycle - The state of the component. In ADP we have aligned the lifecycle with GDS project phases - discovery, alpha, beta, live, retirement.
  • +
  • spec.owner - the group/team that owns the component. Groups are defined under shared entities below.
  • +
  • spec.system - a reference to the system that the component belongs to. Systems are defined under shared entities below.
  • +
  • spec.dependsOn - dependencies on other components and resources, e.g. if a service publishes to a message queue then a reference to that queue would be defined here.
  • +
+

If a component consumes infrastructure such as a database or service bus queue then that must also be defined alongside the component. Multiple entities can be defined in a single file by using a triple dash --- to separate them.

+

Shared entities

+

Users

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/adp-portal/ongoing-development/github-app-permissions/index.html b/Developer-Reference/adp-portal/ongoing-development/github-app-permissions/index.html new file mode 100644 index 0000000..95f3c27 --- /dev/null +++ b/Developer-Reference/adp-portal/ongoing-development/github-app-permissions/index.html @@ -0,0 +1,2978 @@ + + + + + + + + + + + + + + + + + + + + + + + GitHub App Permissions - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

GitHub App Permissions

+

The minimum permissions that we requires for our ADP GitHub App are:

+
    +
  • For each repository in Defra GitHub, we require read access to metadata
  • +
  • For each repository in Defra GitHub, we require read and write access to administration, code, and pull requests
  • +
+

Please note:

+
    +
  • Any other permission assume that is set to "no access"
  • +
  • These set of permissions are required for our short to medium term ADP goal and objectives and we will most likely need to expand the scope as time goes on.
  • +
+

Repository Permissions

+

Repository permissions permit access to repositories and related resources.

+

Repository Administration

+

Repository creation, deletion, settings, teams, and collaborators.

+

Why we need this permission

+
    +
  • When Scaffolding new templated services within Backstage/ ADO, we need to creating Repos and setting default permissions.
  • +
+

Repository Code (Contents)

+

Repository contents, commits, branches, downloads, releases, and merges.

+

Why we need this permission

+
    +
  • When Scaffolding new templated services within Backstage/ ADO, we need to add code to the repos.
  • +
  • Create code, commit, and branch in adp-flux-core to allow us to automate flux.
  • +
+

Potential Risks

+

Repository Metadata (mandatory)

+

Search repositories, list collaborators and access repository metadata.

+

Why we need this permission

+
    +
  • Mandatory when creating an GitHub application.
  • +
+

Potential Risks

+

Pull requests

+

Pull requests and related comments, assignees, labels, milestones and merges.

+

Why we need this permission

+
    +
  • Within Backstage we need to be able to view pull request of matching services.
  • +
  • Create pull requests in adp-flux-core to allow us to automate flux.
  • +
+

Potential Risks

+

Organisation permissions

+

Organisation permissions permit access to organisation related resources.

+

None required at this time

+

Account permissions

+

These permissions are granted on an individual user basis as part of the User authorization flow.

+

None required at this time

+

Potential Issues

+

Risky API Permissions

+

GitHub Apps can request almost any permission from the list of API actions supported by GitHub Apps.

+

Possible Remediations:

+
    +
  • Review GitHub App permissions, selecting only permissions that are required in the short to medium term.
  • +
  • Testing the GitHub App in sandbox organisation.
  • +
  • Understanding context of backstage plugins or code implemented.
  • +
  • Limit GitHub App permissions to specific repositories that are used by the ADP platform. This will have the side effect of decreasing the effectiveness of the platform for the onboarding of view services. Adding a manual step adding in repos (assumption). We would like to keep it the same scope as CDP which is to have access to all repositories in the Defra Org.
  • +
+

Comprised App credentials

+

Leaking or misplaced GitHub App credentials.

+

Possible Remediation

+
    +
  • Following best practices ensuring credentials are secure and stored in a key vault in each of are staging and production ADP Portals (documentation)
  • +
  • Following best practices ensuring credentials are secure when developing locally. Ensuring that the credentials are not checked in and stored out of source control a local env.sh file.
  • +
+

Raised Concerns

+

As far as I can tell, your Backstage installation will have a client secret to use the app identity to perform administrative functions?

+

Answer: +Yes, please see permission break down above with the reasons why we require them.

+

So presumably we could only ever have the one instance of Backstage?

+

“It's not possible to have multiple Backstage GitHub Apps installed in the same GitHub organisation, to be handled by Backstage. We currently don't check through all the registered GitHub Apps to see which ones are installed for a particular repository. We only respect global organisation installs right now.”

+

Answer: +Should be be able to have multiple instances of backstage within the same GitHub organisation. There may be possible conflicts that may occur with certain backstage plugins. For example, GitHub Discovery search for a catalog-info.yaml are repositories to allow for automatic registering of entities. If backstage 1 and backstage 2 use the defaults GitHub Discovery provide configuration will be pick up the same files as each other. To resolve this it will would be as simple as changing config to find yaml(s) file of different names or paths in backstage 1 or 2. There is a second option which would be to restrict what repositories the GitHub Application has access to.

+

To aid in remediating this concern, will be change the config where we can to add an "adp" suffix. For example, "adp-catalog-info.yaml".

+

My other question is why there’s an installed web hook – I don’t really know what this is doing?

+

We are not using the web hook at the moment but we may look to support GitHub events in future (Documentation).

+

Key References

+ + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/adp-portal/ongoing-development/govuk-branding/index.html b/Developer-Reference/adp-portal/ongoing-development/govuk-branding/index.html new file mode 100644 index 0000000..64d7922 --- /dev/null +++ b/Developer-Reference/adp-portal/ongoing-development/govuk-branding/index.html @@ -0,0 +1,2626 @@ + + + + + + + + + + + + + + + + + + + + + + + GovUK Branding for the ADP Portal - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

GovUK Branding for the ADP Portal

+

The ADP portal is built on Backstage, using React and Typescript on the frontend. This page outlines, the steps taken to incorporate the GOV.UK branding into the ADP Portal.

+

Key References:

+ +

Customization of Backstage

+

Backstage allows the customization of the themes to a certain extent, for example the font family, colours, logos and icons can be changed following the tutorials. All of the theme changes have been made within the App.tsx file.

+

GOV.UK Branding

+

🏗️ Setup

+

In order to install GOV.UK Frontend you need to meet the some requirements:

+
    +
  1. Have Node.js installed on your local environment
  2. +
  3. Install Dart Sass
  4. +
+

Once those are successfully installed you can run the following in your terminal within the adp-portal/app/packages/app folder:

+

yarn install govuk-frontend --save

+

In order to import the GOV.UK styles, two lines of code have been added within the style.module.scss file:

+

$govuk-assets-path: "~govuk-frontend/govuk/assets/"; // this creates a path to the fonts and images of GOV.UK assets. +@import "~govuk-frontend/govuk/all"; // this imports all of the styles which enables the use of colours and typography.

+

🎨 Colour Scheme

+

The colour scheme is applied through exporting the GOV.UK colours as variables within the style.module.scss file into the Backstage Themes created. Currently there are a few colours that are being used however more variables can be added within the scss file and can be imported within other files. To import the scss file with the styles variables this statement is used in the App.tsx file:

+

import styles from 'style-loader!css-loader?{"modules": {"auto": true}}!sass-loader?{"sassOptions": {"quietDeps": true}}!./style.module.scss';

+
+

This import statement enables the scss file to load and process.

+
+

The style variables then were used within the custom Backstage themes:

+
1
+2
+3
+4
+5
+6
const lightTheme = createUnifiedTheme({
+...
+  primary: {
+      main: styles.primaryColour,
+    },
+});
+
+

🗛 Typography

+

The font used within the ADP Portal is GDS Transport as the portal will be on the gov.uk domain.

+

To get this working within the style.module.scss file the fonts were imported through assigning it to a scss variable called govuk-assets-path-font-woff2 and govuk-assets-path-font-woff:

+
1
+2
$govuk-assets-path-font-woff2: "~govuk-frontend/govuk/assets/fonts/light-94a07e06a1-v2.woff2";
+$govuk-assets-path-font-woff: "~govuk-frontend/govuk/assets/fonts/light-f591b13f7d-v2.woff";
+
+
+

As recommended we are serving the assets from the GOV.UK assets folder so that the style stays up to date when there is an update to the GOV.UK frontend.

+
+

Then this variable was parsed into the url of the font-face element:

+
1
+2
+3
+4
+5
+6
@font-face {
+  font-family: "GDS Transport";
+  src:
+    url('#{$govuk-assets-path-font-woff2}') format("woff2"),
+    url('#{$govuk-assets-path-font-woff}') format("woff")
+}
+
+

To customize the font of the backstage theme, the scss was imported (check the colour scheme section) and used within the fontFamily element of the createUnifiedTheme function:

+
1
+2
+3
+4
const lightTheme = createUnifiedTheme({
+    ...
+  fontFamily: "'GDS Transport',arial, sans-serif"
+});
+
+ +

The Logo of the ADP Portal was changed by updating the two files within the src/components/Roots folder.

+
    +
  • LogoFull.tsx - A larger logo used when the Sidebar navigation is opened.
  • +
  • LogoIcon.tsx - A smaller logo used when the sidebar navigation is closed.
  • +
+

Both DEFRA logos are imported as png and saved within the Root folder.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/github/pull_request_template/index.html b/Developer-Reference/github/pull_request_template/index.html new file mode 100644 index 0000000..f128fed --- /dev/null +++ b/Developer-Reference/github/pull_request_template/index.html @@ -0,0 +1,2560 @@ + + + + + + + + + + + + + + + + + + + Pull request template - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + +

Pull Request Details

+

What this PR does / why we need it:

+

A brief description of changes being made +Link to the relevant work items: e.g: Relates to ADO Work Item AB#213700 and builds on #3376 (link to ADO Build ID URL)

+

Special notes for your reviewer

+

Any specific actions or notes on review?

+

Testing

+

Any relevant testing information and pipeline runs.

+

Checklist (please delete before completing or setting auto-complete)

+
    +
  • Story Work items associated (not Tasks)
  • +
  • Successful testing run(s) link provided
  • +
  • Title pattern should be {work item number}: {title}
  • +
  • Description covers all the changes in the PR
  • +
  • This PR contains documentation
  • +
  • This PR contains tests
  • +
+

How does this PR make you feel:

+

gif

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/github/verify-gitHub-commit-signatures/index.html b/Developer-Reference/github/verify-gitHub-commit-signatures/index.html new file mode 100644 index 0000000..77bdda2 --- /dev/null +++ b/Developer-Reference/github/verify-gitHub-commit-signatures/index.html @@ -0,0 +1,2685 @@ + + + + + + + + + + + + + + + + + + + + + + + Verify GitHub commit signatures - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + +

Verify GitHub commit signatures

+

The project's branch policy is configured to necessitate the use of Git signed commits for any merging activities. This policy serves a twofold purpose: firstly, it validates the authenticity of changes and acts as a barrier against unauthorised or malevolent alterations to the codebase.

+

Secondly, it provides assurance of code integrity by demonstrating that changes have remained unaltered throughout transit and subsequent commits. During the evaluation of pull requests or merge requests, the presence of signed commits also offers a reliable means to confirm that the proposed changes have been authored by authorised contributors, thereby reducing the likelihood of unintentionally accepting unauthorised code.

+

To use signed commits, developers must generate a GPG (GNU Privacy Guard) key pair, which includes a private key kept secret and a public key that is shared. Commits are then signed using the private key, and others can verify the commits using the corresponding public key.

+

Screenshot of a list of commits. One commit is marked with a

+

Generate New GPG Key

+

Please refer the following link. Please make sure the email you enter in step 8 is your github email account +https://docs.github.com/en/authentication/managing-commit-signature-verification/generating-a-new-gpg-key

+

Adding a GPG Key on Github

+
    +
  • In the upper-right corner, click your profile photo, then click Settings.
  • +
+

Screenshot of GitHub's account menu showing options for users to view and edit their profile, content, and settings. The menu item

+
    +
  • In the "Access" section of the sidebar, click SSH and GPG keys.
  • +
  • Next to the "GPG keys" header, click New GPG key.
  • +
  • In the "Title" field, type a name for your GPG key.
  • +
  • In the "Key" field, paste the GPG key you copied when you generated your GPG key.
  • +
  • Click Add GPG key.
  • +
  • To confirm the action, authenticate to your GitHub account.
  • +
+

Further support and information

+

https://docs.github.com/en/authentication/managing-commit-signature-verification

+

Signing commits using WSL

+
    +
  1. Generate new gpg key
  2. +
  3. Adding a pgp key to your github account
  4. +
  5. telling git about your signing key
  6. +
  7. Still having problems committing try this?
  8. +
  9. Signing old commits
  10. +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Developer-Reference/reference-applications/fcp-demo-services/overview/index.html b/Developer-Reference/reference-applications/fcp-demo-services/overview/index.html new file mode 100644 index 0000000..23241f2 --- /dev/null +++ b/Developer-Reference/reference-applications/fcp-demo-services/overview/index.html @@ -0,0 +1,3049 @@ + + + + + + + + + + + + + + + + + + + + + + + FCP Demo Services - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

FCP Demo Services

+

This documentation is to capture the existing design of demo/exemplar services in FFC platform. This will provide an overview of the components in the demo services and application flow.

+

The demo service contains 6 containerized microservices orchestrated with Kubernetes. The purpose of these services are to prove the platform capability to provision the infrastructure required for developing a digital service along with CI/CD pipelines with minimal effort. This in turn allows the developers to focus on the core business logic.

+

Language of choice

+
    +
  • Node.Js
  • +
  • ASP.NET Core
  • +
+

Tools & External Dependencies

+
    +
  • Azure Service Bus for messaging
  • +
  • PgSql
  • +
+

Existing Demo Services

+

Below are the demo services that are present at the moment.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ServiceDev PlatformGit Repo
Payments ServiceNode.Jshttps://github.com/DEFRA/ffc-demo-payment-service
Payments Service CoreAsp.Net Corehttps://github.com/DEFRA/ffc-demo-payment-service-core
Payments WebNode.Jshttps://github.com/DEFRA/ffc-demo-payment-web
Claim ServiceNode.Jshttps://github.com/DEFRA/ffc-demo-claim-service
Calculation ServiceNode.Jshttps://github.com/DEFRA/ffc-demo-calculation-service
Collector ServiceNode.Jshttps://github.com/DEFRA/ffc-demo-collector
Demo WebNode.Jshttps://github.com/DEFRA/ffc-demo-web
+

Business Context

+

DemoService - Context.png

+

Microservice Architecture

+

image.png +DemoService - Context1.jpg

+

Other Services

+
    +
  • Demo Apply Service + Generated from Claims service. Needs further clarification from dev team.
  • +
  • Demo Apply Web + Generated from Demo web app. Needs further clarification from dev team.
  • +
+

Testing

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CodeDocker ComposeDevTestPre-production
Lint/AuditX
Synk TestX
Static Code Analysis/ SonarCloudX
Functionional/ BDDXX
Intergration Tests/ Contract testing using pact brokerXX
Performance Testing (JMeter)X
Pen Testing (OWASP ZAP)XX
+
    +
  • Code
      +
    • Lint/Audit
    • +
    • Synk Test
    • +
    • Static Code Analysis/Sonar Cloud
    • +
    +
  • +
  • Docker Compose
      +
    • Functional/BDD
    • +
    • Pen Testing
    • +
    +
  • +
  • Dev
      +
    • Functional/BDD
    • +
    • Integrations Test/Contract testing using Pact Broker
    • +
    +
  • +
  • Test
      +
    • Integrations Test/Contract testing using Pact Broker
    • +
    +
  • +
  • Pre-production
      +
    • Performance testing (Jmeter)
    • +
    • Pen Testing
    • +
    +
  • +
+

Challenges

+
    +
  • Docker compose is good if the application is full contained, but it has dependencies, which are unknown at present.
  • +
  • Could remove Docker compose tests, run in SND2 or SND3 , then install dependencies and helm chart which update flux
  • +
+

Considerations for discussion

+
    +
  • DAPR - with RabbitMQ for containerized testing
  • +
  • Open Policy Agent (OPR)
  • +
+

DAPR

+
    +
  • Distributed Application Runtime simplifies the authoring of distributed, microservice- based applications.
  • +
  • Once DAPR is enabled for a container app, a secondary process is created alongside the application that enables communication with DAPR via HTTP or gRPC
  • +
+

Open Policy Agent

+

Azure Policy extends Gatekeeper v3, an admission controller webhook for Open Policy Agent (OPA), to apply at-scale enforcements and safeguards on your clusters in a centralized, consistent manner. Azure Policy makes it possible to manage and report on the compliance state of your Kubernetes clusters from one place. The add-on enacts the following functions:

+
    +
  • Checks with Azure Policy service for policy assignments to the cluster.
  • +
  • Deploys policy definitions into the cluster as constraint template and constraint custom resources.
  • +
  • Reports auditing and compliance details back to Azure Policy service.
  • +
+

Azure Policy for Kubernetes supports the following cluster environments:

+
    +
  • Azure Kubernetes Service (AKS)
  • +
  • Azure Arc enabled Kubernetes
  • +
+

Further reading

+

https://learn.microsoft.com/en-us/azure/governance/policy/concepts/policy-for-kubernetes +https://learn.microsoft.com/en-us/azure/container-apps/dapr-overview?tabs=bicep1%2Cyaml

+

Service Details

+
+

TODO

+

This page is a work in progress and will be updated in due course.

+

Add to details about each service.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Getting-Started/onboarding-a-delivery-programme/index.html b/Getting-Started/onboarding-a-delivery-programme/index.html new file mode 100644 index 0000000..71f28d4 --- /dev/null +++ b/Getting-Started/onboarding-a-delivery-programme/index.html @@ -0,0 +1,2647 @@ + + + + + + + + + + + + + + + + + + + + + + + Onboarding a delivery programme - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Onboarding a delivery programme

+

This getting started guide summarises the steps for onboarding a delivery programme onto ADP via the Portal. It also provides an overview of the automated processes involved.

+

Prerequisites

+

Before onboarding a delivery programme you will first need to ensure that:

+
    +
  • The Arms Length Body (ALB) for your programme has been created within the ADP Portal.
  • +
  • You have an active user account within the ADP Portal with admin permissions to create a delivery programme within your selected ALB.
  • +
  • You have a unique "Delivery Programme code" or "Service Code" for your Delivery Programme
  • +
+

Overview

+

By completing the steps in this guide you will be able to:

+
    +
  • Add a new delivery programme to the ADP portal database under the programme's ALB.
  • +
  • Add a new delivery programmes catalog files to adp-software-templates.
  • +
  • Assign delivery programme managers to administer & maintain the delivery programme.
  • +
+

Guide

+

Creating a Delivery Programme

+

Once you have navigated to the 'ADP Data' page you will be presented with the 'Delivery Programmes' option. +ADP Data +By clicking 'View' you will have the ability to view existing Delivery Programmes and add new ones if you have the admin permissions. +View Delivery Programmes

+

Entering Delivery Programme information

+

You can start entering Delivery Programme information by clicking the 'Add Delivery Programme' button. +Add Delivery Programme +You will be presented with various fields; some are optional. For example, the 'Finance Code', 'Website', and 'Alias' are not required, and you can add them later if you wish.

+

If the Arms Length Body (ALB) for your programme has already been created it will appear in the Arms Length Body dropdown and you will be able to select it accordingly. The programme managers' dropdown should also be pre-populated, and you are able to select more than one manager.

+

This form includes validation. Once you have completed inputting the Delivery Programme Information and pressed 'create', the validation will run to check if any changes need to be made to your inputs.

+

Updating Delivery Programme information

+

Once you have created your Delivery Programme, you will automatically be redirected to the view page which will allow you to look through existing programmes and edit them. +Update Delivery Programme

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Getting-Started/onboarding-a-delivery-project/index.html b/Getting-Started/onboarding-a-delivery-project/index.html new file mode 100644 index 0000000..235e67e --- /dev/null +++ b/Getting-Started/onboarding-a-delivery-project/index.html @@ -0,0 +1,2663 @@ + + + + + + + + + + + + + + + + + + + + + + + Onboarding a delivery project - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Onboarding a delivery project

+

This getting started guide summarises the steps for onboarding a delivery project onto ADP via the Portal. It also provides an overview of the automated processes involved.

+

Prerequisites

+

Before onboarding a delivery project you will first need to ensure that:

+
    +
  • The delivery programme for your project has been onboarded onto ADP, see the Getting Started guide for Onboarding a delivery programme on to ADP.
  • +
  • You have an active user account within the ADP Portal with admin permissions to create a delivery project within your selected delivery programme.
  • +
  • You have a "Service Code" and "Cost Centre" for your delivery project.
  • +
+

Overview

+

By completing this guide you will have completed these actions:

+
    +
  • Adding a new delivery project to ADP portal database under your programme.
  • +
  • Adding a new delivery project catalog files to adp-software-templates.
  • +
  • Assign delivery project admins to adminster delivery project.
  • +
  • Adding creation of a new ADO Team on a selected ADO project.
  • +
  • Adding creation of GitHub Team for delivery project.
  • +
  • Adding Azure group(s) for the delivery project's tech users. Members of this group will be given access to common platform resources and project resource group in tenants, and Defra
      +
    • O365_DefraDev - SND3, data and control plane read/ write.
    • +
    • Defra - DEV1, TST½, data and control plane read/ write.
    • +
    • Defra - PRE1, PRD1, ready access on the control plane. No data plane access given
    • +
    +
  • +
+

Guide

+

Creating a Delivery Project

+

Once you have navigated to the 'ADP Data' page you will be presented with the 'Delivery Projects' option. +ADP Data +By clicking 'View' you will have the ability to view existing Delivery Projects and add new ones if you have the admin permissions. +View Delivery Projects

+

Entering Delivery Project information

+

You can start entering Delivery Projects information by clicking the 'Add Delivery Projects' button. +Add Delivery Projects +You will be presented with various fields; some are optional. For example, the 'Alias', 'Website', 'Finance Code' and 'ADO Project' are not required, and you can add them later if you wish.

+

If the Delivery Programme for your project has already been created it will appear in the Delivery Programme dropdown, and you will be able to select it accordingly.

+

This form includes validation. Once you have completed inputting the Delivery Project Information and pressed 'create', the validation will run to check if any changes need to be made to your inputs.

+

Assign delivery project admins to adminster delivery project.

+

...

+

Adding creation of a new ADO Team on a selected ADO project.

+

...

+

Adding creation of GitHub Team for delivery project.

+

...

+

Adding Azure group(s) for the delivery project's tech users.

+

...

+

Updating Delivery Project information

+

Once you have created your Delivery Project, you will automatically be redirected to the view page which will allow you to look through existing projects and edit them. +Update Delivery Projects

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Getting-Started/onboarding-a-user/index.html b/Getting-Started/onboarding-a-user/index.html new file mode 100644 index 0000000..2f0c6ec --- /dev/null +++ b/Getting-Started/onboarding-a-user/index.html @@ -0,0 +1,2643 @@ + + + + + + + + + + + + + + + + + + + + + + + Onboarding a user - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Onboarding a user

+

This getting started guide summarises the steps for onboarding a user onto your delivery project in ADP. It also provides an overview of the automated processes involved.

+

Prerequisites

+

Before onboarding a user on to your delivery project you will first need to ensure that:

+
    +
  • The delivery project has been onboarded via the ADP portal, see the Getting Started guide for Onboarding a delivery project on to ADP.
  • +
  • You have an active user account within the ADP Portal with admin permissions to onboard users to your selected delivery project.
  • +
  • The user you are onboarding has a valid cloud account (with the domain as: @defra.onmicrosoft.com or @defra.gov.uk). [Need to link to guidence on how to get a cloud account].
  • +
  • If the user you are onboarding is a tech user, they must have a valid GitHub handle.
  • +
  • GitHub account added to DEFRA's SonarCloud organisation.
  • +
+

Overview

+

By completing this guide you will have completed these actions:

+
    +
  • Understanding of tech and non-tech users.
  • +
  • Adding user to ADP portal database under your delivery team. [Not automated]
  • +
  • Adding user to Azure AD ADP portal group, allowing basic read acess to the ADP portal. [Not automated]
  • +
  • Adding user to ADO Team, allowing access to delivery project's ADO project. [Not automated]
  • +
  • Adding user to Defra's VPN group.
  • +
  • Adding tech user to GitHub Team.
  • +
  • Adding tech user to Azure group, allowing access to delivery project's Azure resources.
  • +
+

Guide

+

....

+

Delivery Project Roles

+

...

+

Onboarding a Delivery Project Team Member

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/How-to-guides/Platform-Services/how-to-create-a-database/index.html b/How-to-guides/Platform-Services/how-to-create-a-database/index.html new file mode 100644 index 0000000..52d98ae --- /dev/null +++ b/How-to-guides/Platform-Services/how-to-create-a-database/index.html @@ -0,0 +1,2793 @@ + + + + + + + + + + + + + + + + + + + + + + + How to create a database - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

How to create a database for a platform service

+

PostgreSQL is the preferred relational database for microservices. This guide describes the process for creating a database for a microservice and configuring the microservice to use it.

+
+

Note

+

The ADP Platform currently supports PostgreSQL only as the option available for a relational database.

+
+

How to create a Postgres Database

+

There are two ways for creating a Postgres Database in the ADP.

+
    +
  1. +

    When scaffolding a new Backend service using the ADP Portal. You have the option to specify the name of the database. Refer to the section on Selecting a template

    +
  2. +
  3. +

    For an existing service, you can add values to the Infrastructure Helm Chart values. Refer to Infrastructure section on Database for Flexible Server

    +
  4. +
+
+

Tip

+

An example of how to specify the Helm Chart values is provided in the ffc-demo-claim-service repository, refer to the configuration in the values.yaml.

+
+

How to appy Database migrations

+

The ADP Platform CI and deployment pipelines support database migrations using Liquibase.

+

Create a Liquibase changelog defining the structure of your database available from the root of your microservice repository in changelog/db.changelog.xml.

+

Guidance on creating a Liquibase changelog is outside of the scope of this guide.

+

Update Docker Compose files to use Postgres service and environment variables

+

Update docker-compose.yaml, docker-compose.override.yaml, and docker-compose.test.yaml to include a Postgres service and add Postgres environment variables to the microservice.

+

Replace <programme code> and <service> as per naming convention described above.

+

Local Development

+

The following scripts and files are scaffolded as part of your backend service to provide a good local development experience.

+
    +
  • A docker-compose.migrate.yaml in the root of your microservice repository that spins up Postgres in a Docker container.
  • +
  • The scripts/ folder contains three bash scripts start, test and postgres-wait.
  • +
  • The scripts/migration/ folder contains two scripts to apply and remove migrations. The two scripts are database-up and database-up.
  • +
+

Execute the start script to start the Postgres container.

+
1
+2
+3
+4
+5
+6
+7
+8
+9
# snippet of the code in the start script
+
+cd "${projectRoot}"
+# Guarantee clean environment
+docker-compose down -v
+docker-compose -f docker-compose.migrate.yaml down -v
+# Ensure container images are up to date
+docker-compose -f docker-compose.migrate.yaml run database-up
+docker-compose up --build
+
+

How to Enable POSTGRES Extensions

+

Some microservices require Postgres extensions to be installed in the database. Below is the list of the enabled extensions:

+
    +
  • VECTOR
  • +
  • UUID-OSSP
  • +
+

This is a two step process.

+

Step 1. Enable extensions on the server

+
+

Tip

+

Request the ADP Platform Team to enable the extension on the Postgres Flexible server if it is not in the list above of enabled extensions.

+
+

Step 2. Enable extensions on the database using a user account that has database admin permissions

+

When scripting the database migrations for creating extensions, use IF NOT EXISTS. This will ensure that the scripts can both run locally and an in Azure.

+

When running the Postgres database locally in Docker, you will have sufficient permissions to create the extensions. However, in Azure, the ADP Platform will apply the migrations to the database instead of using the microservice's managed identity. If you don't use IF NOT EXISTS, the migrations on the Azure Postgres database will fail due to insufficient permissions.

+

Below is an example of a SQL script you can use in your migration to enable an extension.

+
1
+2
 CREATE EXTENSION IF NOT EXISTS vector;
+ CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/How-to-guides/Platform-Services/how-to-create-a-platform-service/index.html b/How-to-guides/Platform-Services/how-to-create-a-platform-service/index.html new file mode 100644 index 0000000..0f901fb --- /dev/null +++ b/How-to-guides/Platform-Services/how-to-create-a-platform-service/index.html @@ -0,0 +1,2961 @@ + + + + + + + + + + + + + + + + + + + + + + + How to create a platform service - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

How to create a platform service

+

In this how to guide you will learn how to create a new Platform service on ADP for your delivery project and team. You will also learn what automated actions will take place, and any areas of support that may be needed.

+

Prerequisites

+

Before creating Platform business service (microservice), you will first need to ensure that you have:

+
    +
  • Onboarded delivery project on to ADP.
  • +
  • An active user account on the ADP Portal with admin or user access to/for the delivery project team.
  • +
  • You will need an Azure DevOps project for Pipeline management for your project/programme.
  • +
  • In your teams Azure DevOps (ADO) project, you will need to ensure that the ADP Platform Service Account has the correct permissions for scaffolding your service Pipelines:
      +
    • These permissions are Project, Build, and Endpoint Administrator. Read & Manage Environments.
    • +
    • The ADP Platform Engineers or CCoE can manage this for you.
    • +
    +
  • +
+
+

Note

+

Please contact the ADP Platform Engineering team for support if you don’t have, and cannot setup/configure, these prerequisites.

+
+

Overview

+

By completing this guide, you will have completed these actions:

+
    +
  • Created a GitHub team for the services' delivery project (if not already present) in Defra GitHub org.
      +
    • Developers are added to your GitHub team.
    • +
    +
  • +
  • Scaffolded an ‘exemplar’ Platform service (microservice application) in your chosen development language, with any optional infrastructure.
  • +
  • Scaffolding of an ADO project if not already done:
      +
    • Share service connection and agent pools with ADO project.
    • +
    • Create ADO environment and ‘secrets’ variable group in ADO project.
    • +
    • Authorizes the Service Connection between the created ADO Pipeline & GitHub repository.
    • +
    +
  • +
  • Creation of an ADO CI/CD Pipeline for the Service in the selected ADO project.
  • +
  • Starts the running of your Service CI/CD Pipeline: Builds and deploys your service into the Sandpit / Development environment.
      +
    • Initialization of Sonar Cloud project, Snyk scanning, Service Manifests, etc.
    • +
    +
  • +
  • Register your Service in the Backstage Catalog (via Catalog info YAML file).
  • +
+

Areas of support

+

The following areas require the support of the ADP Platform Team for your service initial setup:

+
    +
  • Domain (URL) creation
  • +
+
+

Note

+

The initial domain (Frontend service or external API URL) creation is currently done via the Platform team pipelines. Please contact the Platform team to create this per environment once the service is scaffolded.

+
+

Guide

+

Selecting a template

+
    +
  1. On the ADP portal click ‘Create...’ on the left-hand side menu.
  2. +
  3. Select the ‘CHOOSE’ button of the template of the service you would like to create.
  4. +
+
+

Tip

+

You can choose a Node.js for Frontends, or for Backends and APIs in Node.Js or C#.

+
+

Entering Component Information

+

Enter the properties describing your component/service:

+
    +
  1. Enter Component Name (service name). It must be a unique name of the component which will be used for the repository, package, and image name.
  2. +
  3. This should be in the format {programme}-{project}-{service}. For example, fcp-grants-web.
  4. +
  5. Enter Description. Describes what this component does. This description will be used in the component's README and package.json.
  6. +
  7. Select the System that this component/service will be a part of. Systems are a collection of related components and resources (i.e., your entire service and associations).
  8. +
  9. Select the Project Phase which suits your service. Refer to the GDS service manual for more information.
  10. +
  11. Select the Owner (team) who will own this component (i.e., your delivery team).
  12. +
  13. Optionally: Select the initial infrastructure (Queues/Topics, Database etc) you want to deploy with your service.
  14. +
  15. More infra can be added/updated later via the YAML file in your repo!
  16. +
  17. Click the Next button to continue.
  18. +
+

Entering Git Repository information

+

To encourage coding in the open the repository will be public by default. Refer to the GDS service manual for more information. You can select a ‘private’ repository by selecting the ‘private repo’ flag in GitHub.

+

The scaffolder will create a new repository and an associated team with ‘Write’ permissions:

+
    +
  1. The host where the repository will be created – the default will be the GitHub organisation of DEFRA.
  2. +
  3. Enter name of the repository. Should be the same as component name (service name).
  4. +
  5. Enter GitHub Team Name. This team will be granted ‘Write’ access to the repository.
  6. +
  7. Enter GitHub Team Description. An optional description of the team.
  8. +
  9. Enter GitHub Team Members. Using comma-separated list of GitHub usernames to be added to the team. For example: GitHubName1,GitHubName2.
  10. +
  11. Select GitHub Team Visibility. This is privacy level this team should have. By selecting Visible teams can be seen by all members in the organization. Secret teams can only be seen by the organization owners and team members.
  12. +
  13. Click Next to move to the next page.
  14. +
+

Entering CI/CD information

+

CI/CD pipelines will be created in Azure DevOps:

+
    +
  1. Azure DevOps Organization. *This will be defaulted to: DefraGovUK and not changeable.
  2. +
  3. Enter your projects ‘Azure DevOps Project Name’. This is the name of your project you are a member of and wish to scaffold your pipelines into.
  4. +
  5. Service Connection Name. *This will be defaulted and not changeable.
  6. +
  7. Enter the Pipeline Folder. The Folder Path is the directory structure for the Pipeline which will be created in your project. For example: ADP/fcp-grants-web.
  8. +
  9. Hint: You can group many pipelines into one Folder structure.
  10. +
  11. Click Review to move to the next page.
  12. +
+

Reviewing entered information

+
    +
  1. Review the information entered and click back if you would like to amend any of the provided information.
  2. +
  3. If you think all entered information is correct click the create button to begin creation of your new service.
  4. +
+

Creating the service

+

Now you have reviewed and confirmed your details, your new Platform service will be created! It will complete the actions detailed in the overview section. Once this process completes, you will be given links to your new GitHub repository, the Portal Catalog location, and your Pipelines. You now have an ADP business service!

+

creation of service in portal

+

Creation of additional infrastructure

+

We use HELM Charts to deploy, manage and update Platform service applications and their dedicated and associated infrastructure. This is ‘self-service’ managed by the platform development teams/tenants. We use Azure Bicep/PowerShell for all other Azure infrastructure and Entra ID configuration, including Platform shared and ‘core’ infrastructure. This is managed by the ADP Platform Engineers team. An Azure Managed Identity (Workload ID) will be automatically created for every service (microservice app) for your usage (i.e. assigning RBAC roles to it).

+

How do I use the HELM Charts for infrastructure with my application?

+

The creation of infrastructure dedicated for your business service/application is done via your microservice HELM Chart in your repository, and deployed by your Service CI/CD pipeline that you created earlier. A ‘helm’ folder will be created in every scaffolded service with 2 subfolders. The one ending with ‘-infra’ is where you define your service’s infrastructure requirements in a simple YAML format.

+
+

Note

+

The full list of supported ‘self-service’ infrastructure can be found in the ADP ASO Helm Library Documentation on GitHub with instructions on how to use it.

+
+

Image below is an example of how-to self-service create additional infrastructure by updating the HELM charts ‘values.yaml’ file with what you require to be deployed:

+

helm chart values file

+
+

Warning

+

Please contact the ADP Platform Engineers Team if you require any support after reading the provided documentation or if you’re stuck.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/How-to-guides/Platform-Services/how-to-create-a-system/index.html b/How-to-guides/Platform-Services/how-to-create-a-system/index.html new file mode 100644 index 0000000..012876c --- /dev/null +++ b/How-to-guides/Platform-Services/how-to-create-a-system/index.html @@ -0,0 +1,2604 @@ + + + + + + + + + + + + + + + + + + + + + + + How to create a system - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

What is a system?

+

A system is a label used to group together multiple related services. This label is recognized and used by backstage in order to make it clear what services interact with eachother. They are a concept which is provided by backstage out of the box, and is documented by them here

+

How to create a system

+

In order to create a system, you simply need to add a new definition for it to the ADP software templates repository. There is an example system to show the format that should be used. Once this system is added, you need to add a link to it from the all.yaml file. You will also need to choose a name for your system, which should be in the format {delivery-project-id}-{system-name}-system e.g. fcp-demo-example-system.

+

Once the system has been added and the all.yaml file has been updated, you will need to wait for the ADP portal to re-scan the repository which happens every hour. If you need the system to be available sooner than that, then an ADP admin can trigger a refresh at any time by requesting a refresh of the project-systems location.

+

The all.yaml file

+

The all.yaml file is what tells the ADP portal where to find the systems, and so every file containing a definition for a system must be pointed to by this file. To point to a new file, you will need to add a new entry to the targets array which should be the relative path from the all.yaml file to your new system file.

+

all.yaml +

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
apiVersion: backstage.io/v1alpha1
+kind: Location
+metadata:
+  name: project-systems
+  description: Systems defined and owned by projects using ADP.
+spec:
+  targets:
+    # There is no need to modify any of the file above this point.
+    - ./another-projects-system.yaml
+    - ./my-system.yaml # Reference a system by a relative path like this.
+

+

The {system}.yaml file

+

Your system will actually be defined inside its own .yaml file. The name of this file should be the name of the system you are creating to make it easier to track which system is defined where. The format of this file should follow this example:

+

my-system.yaml +

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
apiVersion: backstage.io/v1alpha1
+kind: System
+metadata:
+  # This is the id of the system you want to create. It should be less than 100 characters long and
+  # only contain letters, numbers and hyphens and be in the format `{delivery-project-id}-{system-name}-system`
+  name: fcp-demo-example-system
+  # The description is meant to give users a bit of information about what sort of services are meant
+  # to be linked to this system and what it represents. This can be any text that you like, as long as it is valid yaml.
+  description: An example system, intended to be used to help projects to create their own systems.
+spec:
+  # This needs to be a reference to the project that owns the system you are creating. This will always
+  # start with `group:default/`, and the ending bit should be the id of your project. You can find the id
+  # of your project in the url when you view it in the ADP portal, it will be the last segment of the URL
+  owner: "group:default/fcp-demo"
+

+

Id of your delivery project

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/How-to-guides/Platform-Services/how-to-deploy-a-platform-service/index.html b/How-to-guides/Platform-Services/how-to-deploy-a-platform-service/index.html new file mode 100644 index 0000000..a223044 --- /dev/null +++ b/How-to-guides/Platform-Services/how-to-deploy-a-platform-service/index.html @@ -0,0 +1,2860 @@ + + + + + + + + + + + + + + + + + + + + + + + How to create a platform service - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

How to deploy a Platform Service

+

In this how to guide you will learn how to build, deploy, and monitor a Platform service (Web App, User Interface, API etc) for your team. It includes information about Pipelines specifically and how the ADP Backstage Portal supports this.

+

Prerequisites

+

Before building and deploying a service, you will first need to ensure that:

+ +

Overview

+

By completing this guide, you will have completed these actions:

+
    +
  • Learned how to build and run a CI pipeline for your service.
  • +
  • Learned how to deploy your platform service to an environment.
  • +
  • Learned how to monitor your platform service, and check the status of it
  • +
  • Understand how ADP uses Azure DevOps Pipelines for builds and deployments.
  • +
  • How to customize your pipeline to your needs and where you can find it.
  • +
  • How the ADP Portal supports you
  • +
+

Guide

+

How do I find my service’s CI/CD pipeline?

+

All pipelines in ADP are created in your projects/programmes Azure DevOps project. This is specific to your team. It’s the one you chose on your scaffolder creation of a service. We use YAML Azure Pipelines and Defra GitHub to store all code. +Pipelines are mapped 1-1 per microservice, and can deploy the Web App, Infra, App Configuration and Database schema together as an immutable unit.

+

In your scaffolded repository:

+
    +
  • Once you have scaffolded your project/service, you will have a ‘build.yaml’ in an .azureDevOps folder in your microservice repository in GitHub. This is your pipeline instantiation. +In your chosen Azure DevOps project:
  • +
  • On the Pipelines page, either at the root level or under a Pipelines Folder name you defined/chose, you will find your Pipeline.
  • +
  • Your pipeline is convention based including naming. Your pipeline will be called the same name as your repository: <your-service-name>.
      +
    • E.g: <projectcode>-<servicename>-api
    • +
    +
  • +
+

Pipeline

+

Above image an example of a Pipeline scaffolded called ‘adp-demo99’ in the DEMO folder.

+

Can I find this in the ADP Portal?

+

Yes! Simply go to your components page that you scaffolded/created via the ADP Portal, and click on the CI/CD tab, which will give you information on your pipeline, and will link off to the exact location.

+

How do I run my service pipelines?

+

We promote continuous integration (CI) and continuous delivery (CD). Your pipeline will trigger (run the CI build) automatically on any change to the ‘main’ branch, or any feature branch you create and anytime you check-in. This includes PR branches. You simply run your pipeline from the ADO Pipelines interface by clicking ‘Run pipeline’.

+

You can:

+
    +
  • Just run a CI build on check-in on feature branches – default approach.
  • +
  • Run a CI build and an automatic deployment into Sandpit/Dev from a feature branch.
  • +
  • Run a CI from the main branch and promote your code from development to production.
  • +
  • Customise your CI, PR and build triggers, deploy configuration only or the full app, in your services ‘build.yaml’.
  • +
  • Run your pipeline manually/on-demand, by selecting ‘Run pipeline’ blue-button on the top-right hand corner of your Azure Pipelines project page.
      +
    • You can select any feature and main branches and customise any flags for deployment.
    • +
    • You can run from Commit ID and from GitHub Release Tag.
    • +
    • You can override variables if required.
    • +
    +
  • +
+

Are there any requirements for my CI pipeline to run?

+
    +
  • You must update your App Version using sematic versioning, at least once.
      +
    • This is an update in your ‘package.json’ or your .’csproj’ file to the Major, Minor or Patch.
    • +
    +
  • +
  • You must be building a NodeJs or C# app that is containerised with a HELM Chart.
  • +
+

Pipeline documentation and parameters and configuration options can be found here.

+

Run Pipeline

+

Above image of pipeline run example.

+

How do I customise my CI/CD pipelines?

+

You can change some basic functionality of your pipeline. A lot of it is defined for you in a convention-based manner, including the running of unit tests, reporting, environments that are available etc, and some are selectable, such as build of .NET or NodeJS apps, location of test files, PR and CI triggers, and the parameters to deploy configuration only or automatic deploy on every feature build. +Full details can be found on the Pipelines documentation GitHub page. +Pipeline Parameters

+

Above image is an example of what can be changed in terms of Pipeline Parameters (triggers, deployment types, paths to include/exclude). +The below image is an example of what can be changed. You can change things like your config locations, test paths, what ADO Secret variable groups you wish to import, what App Framework (Node or C#) etc. +yaml pipeline

+

What does my progression look like through environments?

+

To promote your code through environments, you can use the Azure Pipelines user interface for your team/project to either:

+
    +
  • Allow automated progression via automated checks or and continuous delivery or,
  • +
  • Manually push/promote the code to environments on demand.
  • +
+

Your environments and any default gates or checks will be automatically plotted for you. This is an example of a full pipeline run. You can select, based on the Platform route-to-live documentation, which environments you promote code to. You don’t need to go to all environments to go live.

+

Pipeline Run Completed

+

This is an example of a waiting ‘stage’ which is an environment:

+

Waiting Stage

+

To promote code, you can select ‘Review’ in the top-right hand corner and click approve.

+

Full Azure Pipelines documentation can be found here.

+

What ways can I monitor my pipelines and service, and the steps that are run?

+

Every pipeline run includes steps such as unit tests, integration tests, acceptance tests, app builds, code linting, static code analysis including Sonar Cloud, OWASP checks, performance testing capability, container/app scanning with Snyk etc.

+

We report out metrics in Azure DevOps Pipelines user interface for your project and service, for things like Unit Test coverage, test passes and failures, and any step failures. Full details are covered in Pipelines documentation. Details can also be found in your projects Snyk or Sonar Cloud report.

+

From the ADP Backstage Portal, you can find the following information for all environments:

+
    +
  • Deployments and HELM release status, including app versions and which environments.
  • +
  • Services deployed into AKS (Kubernetes) and their health status and stats.
  • +
  • Any health status codes, errors, and issues
  • +
  • Any log outputs for running services, including any errors.
  • +
  • Last reconciliation (deployment) date and time via FluxCD, and any successes or failures
  • +
  • Debug and log information
  • +
  • Overview of Pipeline runs in ADO.
  • +
  • Pull Requests open/closed etc.
  • +
  • Access to Grafana and Dashboards, to monitor and maintain your service:
      +
    • Includes Prometheus logs, status, errors, consumption etc.
    • +
    +
  • +
  • Any service documentation for your app
  • +
  • Any dependencies for your apps, and how it links to other projects, programmes, and services.
  • +
  • Any API specifications (Open API) and associated components
  • +
  • Key links for your service – GitHub, ADO, Snyk, Sonar Cloud, Azure Portal, Dashboards, your Service’s frontend URL etc.
  • +
+

The portal is fully self-service. And each component deployed details the above. You should use the ADP Portal to monitor, manage and view data about your service if it isn’t included in your Pipeline run.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/How-to-guides/Testing/how-to-create-acceptance-test/index.html b/How-to-guides/Testing/how-to-create-acceptance-test/index.html new file mode 100644 index 0000000..65f173e --- /dev/null +++ b/How-to-guides/Testing/how-to-create-acceptance-test/index.html @@ -0,0 +1,2856 @@ + + + + + + + + + + + + + + + + + + + + + + + How to create an acceptance test - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

How to create an acceptance test

+ +

How to acceptance test

+

In this how to guide you will learn how to create, deploy, and run an acceptance test for a Platform service (Frontend Web App or an API) for your team.

+

Prerequisites

+

Before adding acceptance tests for your service, you will need to ensure that:

+ +

Overview

+

By completing this guide, you will have completed these actions:

+
    +
  • Learned how to add an acceptance test for your service.
  • +
  • Learned how to run an acceptance test locally.
  • +
  • How to customize your pipeline to run an acceptace tests based on tags for different env.
  • +
+

Guide

+

These tests may include unit, integration, acceptance, performance, accessibilty etc as long as they are defined for the service.

+
+

Note

+
+

The pipeline will check for the existence of the file test\acceptance\docker-compose.yaml to determine if acceptance tests have been defined.

+

How to add acceptance test for your service?

+

You may add tags to features and scenarios. There are no restrictions on the name of the tag. Recommended tags include the following: @sanity, @smoke, @regression +refer

+

If custom tags are defined, then the pipeline should be customized to run those tests as detailed in following sections.

+

How to run acceptance test locally?

+

Set required tags, default is empty string which will run all tests

+
    +
  • pwsh : $ENV:TEST_TAGS = "@sanity or @smoke"
  • +
  • shell: export TEST_TAGS = "@sanity or @smoke"
  • +
+

Run the acceptance test script under scripts folder within the repo

+
1
+2
+3
docker-compose up -d
+cd test/acceptance
+docker-compose run --rm wdio-cucumber
+
+

How to customize your pipeline to run acceptace tests?

+
+

Note

+

Every pipeline run includes steps to run various post deployment tests. +These tests may include unit, integration, acceptance, performance, accessibilty etc as long as they are defined for the service.

+
+

You can customize the tags and environments where you would like to run specific features or scenarios of acceptance test

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
postDeployTest:      
+  testEnvs:
+    performanceTests: snd4, pre1
+    accessibilityTests: snd4, tst1,     
+    acceptanceTests:
+      - env: snd4
+        tags: '@demotag'
+      - env: dev1
+        tags: '@sanity or @smoke'
+  envToTest: snd4,dev1,tst1,pre1
+
+

If not defined, the pipeline will run with following default settings.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
postDeployTest:      
+  testEnvs:
+    performanceTests: snd4, pre1
+    accessibilityTests: snd4, dev1, tst1    
+    acceptanceTests:
+      - env: snd4
+        tags: '@sanity or @smoke'
+      - env: dev1
+        tags: '@smoke'
+      - env: tst1
+        tags: '@smoke or @regression'         
+  envToTest: snd4,dev1,tst1,pre1
+
+

Please refer ffc-demo-web pipeline:

+

Test execution reports will be available via Azure DevOps Pipelines user interface for your project and service.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/How-to-guides/Testing/how-to-create-performance-test/index.html b/How-to-guides/Testing/how-to-create-performance-test/index.html new file mode 100644 index 0000000..8e04094 --- /dev/null +++ b/How-to-guides/Testing/how-to-create-performance-test/index.html @@ -0,0 +1,2843 @@ + + + + + + + + + + + + + + + + + + + + + + + How to create a performance test - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

How to create a performance test

+ +

How to acceptance test

+

In this how to guide you will learn how to create, deploy, and run a performance test for a Platform service (Web App, User Interface etc) for your team.

+

Prerequisites

+

Before adding acceptance tests for your service, you will need to ensure that:

+ +

Overview

+

By completing this guide, you will have completed these actions:

+
    +
  • Learned how to add a performance test for your service.
  • +
  • Learned how to run a performance test locally.
  • +
  • How to customize your pipeline to run performance tests for different env.
  • +
+

Guide

+
+

Note

+

Every pipeline run includes steps to run varoious tests pre deployment and post deployment. These tests may include unit, integration, acceptance, performance, accessibilty etc as long as they are defined for the service.

+

The pipeline will check for the existence of the file test\performance\docker-compose.jmeter.yaml to determine if performance tests have been defined.

+
+

How to add a performance test for your service?

+

The Performance Test scripts should be added to the test\performance folder in the GitHub repository of the service. Refer to the ffc-demo-web example. This folder should contain a docker-compose.jmeter.yaml file is used to build up the docker containers required to execute the tests. As a minimum, this will create a JMeter container and optionally create Selenium Grid containers. Using BrowserStack is preferred to running the tests using Selenium Grid hosted in Docker containers because you get better performance and scalability as the test load increases.

+

Requirments for local development

+
    +
  • Docker Desktop 2.2.0.3 (42716) or higher
  • +
  • JMeter v5.5 or above
  • +
+

How to run a performance test locally?

+

Executre the above commands in bash or PowerShell

+
1
+2
+3
+4
+5
cd test/performance
+
+# this will execute the docker-compose at the root folder to create an instance of the service and its dependences
+# and then it will create the performance testing containers (JMeter and any other containers specified in docker-compose.jmeter.yaml)
+docker-compose -f ../../docker-compose.yaml -f docker-compose.jmeter.yaml run jmeter-test
+
+

How to parameterising your Tests

+

You can modify the number of virtual users, loop count and ramp-up duration by changing the settings in the file perf-test.properties.

+
1
+2
+3
+4
+5
+6
+7
# Sample user.properties file
+#---------------------------------------------------------------------------
+# Properties added to manage noThreads rampUp lCount values
+#---------------------------------------------------------------------------
+noThreads=15 
+rampUp=1 
+lCount=2
+
+

You can then reference these variables in your JMeter Script.

+

Set default values for JMeter variables

+

Example of referencing the variables in your JMeter Script

+

How to customize your pipeline to run performance tests?

+

You can customize the environments where you would like to run specific features or scenarios of performance test

+
1
+2
+3
+4
postDeployTest:      
+  testEnvs:
+    performanceTests: pre1
+  envToTest: snd4,dev1,tst1,pre1
+
+

if not defined, the pipeline will run with following default settings

+
1
+2
+3
+4
postDeployTest:      
+  testEnvs:
+    performanceTests: snd4, pre1
+  envToTest: snd4,dev1,tst1,pre1
+
+

Please refer ffc-demo-web pipeline:

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Migrate-to-ADP/migrate-a-delivery-project/index.html b/Migrate-to-ADP/migrate-a-delivery-project/index.html new file mode 100644 index 0000000..e3584ca --- /dev/null +++ b/Migrate-to-ADP/migrate-a-delivery-project/index.html @@ -0,0 +1,2646 @@ + + + + + + + + + + + + + + + + + + + + + + + Migrate to ADP - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Project Migration Process

+

Details on migrating your existing project and its services to ADP.

+

Project Migration Stages

+

Project Migration Stages

+

Project Migration Timeline

+

Project Migration Timeline

+

Pre-migration

+
    +
  • Please fill in the migration questionnaire. Template is in the internal ADP documentation GitHub repository [Link].
  • +
  • Once complete you will agree with the ADP Platform Team & CCoE when to be migrated and onboarded on to ADP.
  • +
+

Infrastructure Migration

+

ADP Portal Setup:

+
    +
  1. Onboarding of delivery programme if not present already.
  2. +
  3. Onboarding of delivery project
  4. +
  5. Onboarding of your team members
  6. +
+

For each of your platform services you now need to migrate them over to ADP and create the needed infrastructure to support them. Link to guide.

+

Once all services/ infrastructure are created and verified in SND3 (O365_DefraDev), will be begin the process of pushing the services/ infrastructure to environment in the DEFRA tenant, DEV1, TST½, and PRE1. Once deployment is complete and tested in lower we will be able to progress to PRD1 ensure that the DEFRA release management progress is adhered to.

+

Developer Migration

+

Developers of the delivery project actively learning and using the platform to develop new features.

+

As a new developer we recommend starting at "Why ADP" to under the platforms benefits and challenges.

+

Data Migration

+

Near to completion of the migration before the service goes live on ADP. Data from the old production environment will need to be moved into data services wih in ADP that was created as part of the infrastructure migrations stage. In order for this stage to go smoothly the old production traffic will need to be stopped in order to stop the flow of traffic in to the old data services. Allowing the data of old services to be transferred into the selected ADP data services.

+

Depending on the selected data service it will require different methods to transfer data between production environment which is detailed in "migrate-production-data".

+

Switch Over

+

...

+

Business As Usual (BAU)

+

Migration complete continue to

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Migrate-to-ADP/migrate-a-platform-service/index.html b/Migrate-to-ADP/migrate-a-platform-service/index.html new file mode 100644 index 0000000..12073bd --- /dev/null +++ b/Migrate-to-ADP/migrate-a-platform-service/index.html @@ -0,0 +1,2609 @@ + + + + + + + + + + + + + + + + + + + + + + + Migrate a service to ADP - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Migrate a service to ADP

+

In this guides you will learn how to migrate your existing service to ADP.

+

Prerequisites

+
    +
  • Onboarded delivery project on to ADP.
  • +
  • GitHub repository in the Defra Org of your existing service.
  • +
  • GitHub repository added to GitHub Apps.
  • +
  • Azure DevOps with ADP service connection added.
  • +
  • Team/ Delivery Project created in the ADP portal.
  • +
  • Access to Azure environments including in O356_DefraDev Tenant (SND3) & Defra Tenant (DEV1+).
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Migrate-to-ADP/migrate-production-data/index.html b/Migrate-to-ADP/migrate-production-data/index.html new file mode 100644 index 0000000..c6a71c6 --- /dev/null +++ b/Migrate-to-ADP/migrate-production-data/index.html @@ -0,0 +1,2544 @@ + + + + + + + + + + + + + + + + + + + + + + + Migrate production data - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Migrate Production Data

+

In this guide you will learn how to migrate your existing service to ADP.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/adp-portal/adp-copilot/index.html b/Platform-Architecture/adp-portal/adp-copilot/index.html new file mode 100644 index 0000000..3dcda01 --- /dev/null +++ b/Platform-Architecture/adp-portal/adp-copilot/index.html @@ -0,0 +1,2975 @@ + + + + + + + + + + + + + + + + + + + + + + + ADP Copilot - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

ADP Copilot

+

Overview of the ADP Copilot, a tool that provides a conversational interface to the Azure Development Platform (ADP). It outlines the features and capabilities of the ADP Copilot, such as the ability to interact with the ADP Portal, Azure DevOps, and GitHub using natural language. It describes how the ADP Copilot can be used to create, manage, and monitor resources in Azure, Azure DevOps, and GitHub, as well as how it can be used to automate tasks and workflows. The ADP Copilot is designed to streamline the development process and improve collaboration between team members by providing a unified interface for interacting with the ADP Platform.

+

Key Features

+

The ADP Copilot provides the following key features:

+
    +
  • Conversational Interface: Allows users to interact with the ADP Platform using natural language.
  • +
  • Integration with ADP Documentation: Provides access to the ADP Documentation to view and search for information.
  • +
  • Integration with ADP Portal: Provides access to the ADP Portal to view and manage resources.
  • +
+

Architecture

+

ADP Copilot Architecture

+

The ADP Copilot is built using the following components:

+

ADP Documentation - Azure Pipeline

+
    +
  • Azure Pipeline for building and deploying the ADP Documentation.
      +
    • On commit to main branch to ADP External & Internal Documentation.
    • +
    • Build and deploy the documentation to Azure Blob Storage.
    • +
    • Run a python script to update the ADP Documentation search index storing each document into indexed/ vectorized chunks with the documentation formatter as metadata.
    • +
    +
  • +
+

Example of the metadata stored in formatter of the documentation:

+
1
+2
+3
+4
+5
+6
+7
+8
---
+title: ADP Copilot
+summary: Overview Architecture of ADP Copilot
+uri: https://defra.github.io/adp-documentation/Platform-Architecture/adp-portal/adp-copilot/
+authors:
+    - Logan Talbot
+date: 2024-04-22
+---
+
+
+

Info

+

All of these formatter fields are required for the documentation to be indexed correctly.

+
+

ADP Portal API - AI Orchestrator

+
    +
  • ADP Portal API - used to the main AI orchestrator called as API endpoints by the ADP Portal.
      +
    • .NET Core Web API that uses Semantic Kernel to process the natural language queries made by the user and orchestrates the responses from the various services interacted.
    • +
    • Semantic Kernel will use OpenAI GPT-4 to process the natural language queries made by the user.
    • +
    +
  • +
+

ADP Portal - Copilot

+
    +
  • ADP Portal will integrate a Chat Copilot into the UI allowing a user to interact with the ADP Platform using natural language.
      +
    • The Chat Copilot will called the ADP Portal API to process the natural language queries made by the user.
    • +
    +
  • +
+

Azure OpenAI - Models

+

Uk South Azure OpenAI API used to process the natural language queries made by the user. This restricts which models can be used and the amount of data that can be processed.

+
    +
  • OpenAI GPT-4-turbo: Used to process the natural language queries made by the user. ADP will also experiment with other models like GPT 3.5 turbo.
  • +
  • OpenAI text-embedding-ada-002: Used vectorized and index the ADP Documentation to provide search capabilities. The preferred model would be text-embedding-3-large due to its capabilities but it is not available in any UK region.
  • +
+

Azure AI Search - Search Index

+

Azure AI Search Index used to store the vectorized and indexed ADP Documentation. There is current only one index used and requires no indexer to populate the index dye to a script that updates the index in the Azure Pipeline:

+

Index fields:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Field NameTypeRetrievableFilterableSortableFacetableSearchableDescription
idStringYesYesNoNoNoUnique identifier of the document
contentStringYesNoNoNoYesContent of the document
content_vectorSingleCollectionYesNoNoNoYesVector representation of the document content
titleStringYesNoNoNoYesTitle of the document
sourceStringYesYesNoNoNoSource of the document
uriStringYesYesNoNoNoURI of the document
last_updateDateTimeOffsetYesYesNoNoNoLast update timestamp of the document
summaryStringYesNoNoNoNoSummary of the document
repositoryStringYesYesNoNoNoGitHub repository of the document
metadataStringYesNoNoNoYesfull metadata of a document
+

Azure Cosmos DB - Chat History

+

Azure Cosmos DB used to store the chat history of the user interactions with the ADP Copilot. This is used to provide a history of the interactions for an ADP user and to improve the AI orchestration used and auditing requirements ADP Copilot.

+

Example:

+

+
+

Development Stages

+

The ADP Copilot is currently in the development stage and is being built in the following stages:

+
+

Note

+

We are using the Intelligent Application Lifecycle to develop the ADP Copilot.

+
+

Explore: Proof of Concept

+
    +
  • Integrate with the ADP Documentation (external only) with Manual local indexing to popular the Azure AI Search Index.
      +
    • Add front matter to all markdown files in the ADP Documentation (internal).
    • +
    • Create a script to index the ADP Documentation including adding of metadata & chucking of the markdown files by headers.
    • +
    +
  • +
  • Basic conversational interface integrated with the ADP Portal.
  • +
  • Basic Q&A AI Orchestrator using OpenAI GPT-4 & Azure AI Search in the ADP Portal API.
  • +
  • Saving of chat history to Azure Cosmos DB.
  • +
  • Increasing context of user interactions with the ability to include chat history in the AI Orchestrator.
  • +
+

Build & Augment

+
    +
  • Adding to ADP Documentation Pipeline to update the search index with metadata on commit.
  • +
  • Creation of infrastructure for the ADP Copilot. Including:
      +
    • Azure AI Search
    • +
    • Azure Cosmos DB with Database & Containers
    • +
    • Azure OpenAI API with models deployments (GPT-4-turbo & text-embedding-ada-002)
    • +
    +
  • +
  • Integrate external ADP Documentation with Azure AI Search Index.
  • +
  • TBC
  • +
+

Improve & Optimise

+

TBC

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/adp-portal/adp-portal-testing/index.html b/Platform-Architecture/adp-portal/adp-portal-testing/index.html new file mode 100644 index 0000000..c3574e0 --- /dev/null +++ b/Platform-Architecture/adp-portal/adp-portal-testing/index.html @@ -0,0 +1,2582 @@ + + + + + + + + + + + + + + + + + + + + + + + ADP Portal Testing Overview - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

ADP Portal Testing

+

The table below details the roles and permissions that can be used for manual testing of the ADP portal and specific actions

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameRolePermissions
adptestuser1Portal UserNo permissions but has portal access (Added to portal users group)
adptestuser2Team MemberNon-tech team member of the Delivery Project team
adptestuser3Admin Tech Team MemberAdmin tech team member of the Delivery Project team
adptestuser4Programme AdminDelivery Programme Admin of a programme
adptestuser5Admin and Tech MemberAdmin for project A and also tech member for Project B
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/application-hosting/index.html b/Platform-Architecture/architectural-components/application-hosting/index.html new file mode 100644 index 0000000..2e29155 --- /dev/null +++ b/Platform-Architecture/architectural-components/application-hosting/index.html @@ -0,0 +1,2584 @@ + + + + + + + + + + + + + + + + + + + + + + + Application Hosting - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +
+

TODO

+

This page is a work in progress and will be updated in due course.

+
+

Application Hosting

+

This article details the Service Application and Hosting Architecture for the solution at a high level. It will detail some of the initial decisions made and reasonings.

+

This ADR is to record App Hosting services.

+

Context -

+

Application Hosting is a key part of building and delivering scalable and secure microservices for business services.

+

TL;DR-

+

ADP will build upon multiple Containerized Hosting options within Azure. Primarily, this will focus on Azure Kubernetes Service (AKS) to orchestrate, scale and run business services in a transparent manner. AKS has been chosen as a primary toolchain because of it's scalability, security and orchestration capabilities. Secondary options that are being continually evaluated and tested include Azure Container Apps (secondary) and Azure Functions (trigger or schedules).

+

All applications that run be must containerized and the default choice is AKS.

+

Requirements-

+
    +
  • All services must be containerized, and must support any containerized application
  • +
  • Hosting options should be secure and scalable without degradation
  • +
  • Must provide good orchestration & management of services
  • +
  • Must promote CI/CD best practices and should support GitOps
  • +
  • Should make best use of Cloud provider functionality and integrate well with Cloud Provider concepts and principles
  • +
  • Meet Azure Well Architected Standards
  • +
  • Support 100's of services without IP address constraints
  • +
  • Should be built upon Open-Source technologies
  • +
+

Decision -

+

Primary:

+
    +
  • Azure Kubernetes Services (AKS)
  • +
+

Secondary:

+
    +
  • Azure Container Apps (ACA)
  • +
  • Azure Functions
  • +
+

Azure Keyvault supports public CA integration with DigiCert, this is a fully Microsoft managed CSR submission and approval process, this will be used for "Back end" certificates in all environments. https://dev.azure.com/defragovuk/DEFRA-DEVOPS-COMMON/_git/Defra.Certificate.Renewals

+

Approval-

+

Platform Architecture

+
    +
  • Dan R, Mike B, Ken B.
  • +
+

ms-arch.png

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/ci-cd-and-automation/azure-service-operator-for-aks/index.html b/Platform-Architecture/architectural-components/ci-cd-and-automation/azure-service-operator-for-aks/index.html new file mode 100644 index 0000000..4b53758 --- /dev/null +++ b/Platform-Architecture/architectural-components/ci-cd-and-automation/azure-service-operator-for-aks/index.html @@ -0,0 +1,2983 @@ + + + + + + + + + + + + + + + + + + + + + + + Azure Service Operator for AKS - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Azure Service Operator for AKS

+

What is Azure Service Operator?

+

Azure Service Operator (ASO) allows you to deploy and maintain a wide variety of Azure Resources using the Kubernetes tooling you already know and use, i.e. HELM, YAML configuration files and FluxCD or Kubectl.

+

Instead of deploying and managing your Azure resources separately from your Kubernetes application, ASO allows you to manage them together, automatically configuring your application as needed. For example, ASO can set up your Redis Cache or PostgreSQL database server and then configure your Kubernetes application to use them.

+

Why use Azure Service Operator v2?

+
    +
  • K8s Native: we provide CRDs and Golang API structures to deploy and manage Azure resources through AKS.
  • +
  • Azure Native: our CRDs understand Azure resource lifecycle and model it using K8s garbage collection via ownership references.
  • +
  • Cloud Scale: we generate K8s CRDs from Azure Resource Manager schemas to move as fast as Azure.
  • +
  • Async Reconciliation: we don’t block on resource creation, and we can use things like FluxCD to manage infrastructure in a GitOps approach
  • +
  • Developer Self-Service: it allows you, a developer, to manage and deploy your Azure infrastructure alongside your application, and be directly in control of your dependencies.
  • +
  • Developer-centric tooling: our development community on AKS focuses on building applications hosted on AKS, and they can use their native toolsets, such as HELM Charts and YAML Configs to do it - without learning any other language, i.e. Bicep, ARM or Terraform.
  • +
+

What is the Platform's goal here?

+

To enable as much developer self-service as possible for 80% of common developer scenarios when building business services.

+

Can I have an example diagram? - Sure! have one from Microsoft.. article here...

+

Using Azure Service Operator to provision and manage Azure Database for  MySQL - Flexible Server from within Kubernetes

+

What is our alternative approach?

+

Currently, we have an approach where developers will use HELM Charts and FluxCD to self service deploy their applications to AKS in a GitOps manner securely. It is a developer-driven CI and CD processes without Platform team involvement for the majority of the work. Common HELM libraries are their to support you as well. With the addition of ASO, this expands to Azure infrastructure too (storage, queues, identities) outside of AKS that supports your applications.

+

If ASO is not appropriate for the scenario or the component isn't supported, we can use our platform-native fallback: Azure Pipelines with Azure Bicep templates and/or PowerShell and CLI scripts. It's important to remember that Bicep and our supporting scripts are our bedrock, defined in such a way that allows for a scalable approach to manage a Service team's dedicated infrastructure. But it requires deeper context of Azure, the configuration and a good understanding of Bicep or PowerShell etc.

+

With the dual track approach, we can scale and support a wide variety of teams, and have fallback options and Azure native tooling to ensure we can continue to deliver applications to production following assured processes.

+

Any other tools exist to compete with ASO?

+
    +
  1. CrossPlane
  2. +
  3. Score
  4. +
+

What is supported for the Platforms ASO?

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Azure ComponentASO Support?MVP?Self-Service?Manage base component?Description & considerations
Resource GroupsYYYYRG write on Sub only.
PostgreSQL DatabaseYYYNDatabase write/read on Postgre only. Workload IDs assigned to specific per-service DBs.
Managed IdentitiesYYYYCan create and assign RBAC
Storage AccountsYYYYCan create & manage, with templated support for networking & config
Service Bus (Queues/Topics)YYYNCan only create/read: Queues, Topics, Subscriptions, and RBAC on specific entities.
Authorization (RBAC on components)YYYNRBAC on designated components within Subscription.
Azure Front DoorYNYNTBD: allow self-creation of DNS entries, domains and routes to designated Cluster.
+

What about other, unlisted resources?

+

It is safe to assume, if it's not listed, it's not ASO supported and will be directly managed via Bicep templates, modules and PWSH Scripts.

+

Further to this, you will not be able to fully manage the lifecycle of some resources, i.e. Service Bus or the PostgreSQL Flexible Server. This is by design as it's a Platform responsibility.

+

How far will you roll ASO out?

+

We simply don't know at this stage. It is in trial mode and our approach may differ as we expand, learn more and grow as a Platform.

+

Azure Service Operator Setup

+

We have setup ASO in a Single Operator mode with a multi-tenant configuration, enabling the use of separate credentials for managing resources in different Kubernetes namespace.

+

Credential type

+

Azure Service Operator supports four different styles of authentication today.

+

Azure-Workload-Identity authentication (OIDC + Managed Identity) is being used by ASO in ADP.

+

Credential scope

+

Each supported credential type can be specified at one of three supported scopes:

+

Global - The credential applies to all resources managed by ASO. +Namespace - The credential applies to all resources managed by ASO in that namespace. +Resource - The credential applies to only the specific resource it is referenced on. +When presented with multiple credential choices, the operator chooses the most specific one: resource scope takes precedence over namespace scope which takes precedence over global scope.

+

ASO in ADP is using Namespace scoped credentials. Each project team will have an ASO secret in their own namespace linked to a Managed Identity which will only have access to Azure resources the team should be allowed access to.

+

The Platform Team will have their own ASO credential scoped at the Subscription level with Contributor and UAA access. This will allow the Platform Team to create Project Team resources using ASO.

+

image.png

+

The Platform Team will create the following resources using ASO to onboard a Project Team:

+
    +
  • userassigned-identity
  • +
  • federated-credential
  • +
  • resource-group
  • +
  • role-assignments-rg
  • +
  • role-assignments-sb
  • +
  • role-assignments-pgdb
  • +
+

TODO

+

We still need to work out how to inject certain values automatically into the ASO Kubernetes secrets managed by Flux. These are currently being added manually as post deployment step.

+

The values we need to pass in for the Platform Team secret are:

+
    +
  • TenantID
  • +
  • SubscrictionId
  • +
  • ClientID
  • +
+

The values we need to pass in for the Project Team secret and Managed Identity Federated credential are:

+
    +
  • TEAM_MI_CLIENT_ID
  • +
  • CLUSTER_OIDC
  • +
+

The below story has been raised to capture the above requirements: +https://dev.azure.com/defragovuk/DEFRA-FFC/_workitems/edit/248355

+

We have also created a configmap which is manually installed on AKS in SND1. We did this so we didn't have these variables visible in our public repo. This will also need to be automated.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: platform-vars
+  namespace: config-cluster-flux
+data:
+  ENVIRONMENT: "snd"
+  ACR_NAME: "sndadpinfcr1401"
+  NAMESPACE: "config-cluster-flux"
+  SERVICEBUS_RG: "sndadpinfrg1401"
+  SERVICEBUS_NS: "sndadpinfsb1401"
+  POSTGRES_SERVER_RG: "sndadpdbsrg1401"
+  POSTGRES_SERVER: "sndadpdbsps1401"
+  INFRA_RG: "sndadpinfrg1401"
+  APPCONFIG_SERVICE: "sndadpinfac1401"
+  TEAM_MI_PREFIX: "sndadpinfmid1401"
+  CLUSTER_OIDC: "https://uksouth.oic.prod-aks.azure.com/6f504113-6b64-43f2-ade9-242e05780007/e1f44edd-ac8e-4e4d-928a-2d2a7a52d4b7/"
+  TENANT_ID: "6f504113-6b64-43f2-ade9-242e05780007" 
+  SUBSCRIPTION_ID: "55f3b8c6-6800-41c7-a40d-2adb5e4e1bd1"
+  SUBSCRIPTION_NAME: "azd-adp-snd1"
+  CLUSTER: "01"
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/ci-cd-and-automation/common-pipelines/index.html b/Platform-Architecture/architectural-components/ci-cd-and-automation/common-pipelines/index.html new file mode 100644 index 0000000..8bae0ee --- /dev/null +++ b/Platform-Architecture/architectural-components/ci-cd-and-automation/common-pipelines/index.html @@ -0,0 +1,2742 @@ + + + + + + + + + + + + + + + + + + + + + + + Common pipelines - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Common Pipelines for delivery

+

We are an Internal Development Platform that supports the delivery and maintenance of business services and applications for a variety of consumers. As part of this, we have a range of Common Pipelines that both Platform and Delivery teams can use to build and deploy their applications to all environments.

+

Key Principles

+

As a business service using the Azure Developer Platform (ADP), you are defined as a Platform Tenant. That means your 'service' or 'product' is deployed onto the ADP and follow certain standards and conventions to expedite delivery.

+
    +
  • All Pipelines are in Azure DevOps, written typically in YAML
  • +
  • We support Service and Infrastructure deployments across all environments
  • +
  • We use Bicep Modules to manage Infrastructure-as-Code
  • +
  • All general configuration is in GitHub
  • +
  • We can support pre-built packages from an external CI process, but we also have our own
  • +
+

Infrastructure deployments

+

Infrastructure is layered into the following levels:

+
    +
  1. Product (Application) +This represents an application (business service/product) and its associated infrastructure.
  2. +
  3. Core (Platform) +This represents the Core Platform components, and fully shared infrastructure
  4. +
  5. Bootstrap (Platform) +The minimum we need to deliver a baseline setup.
  6. +
+

Each level builds upon each other. So that means the Bootstrapping comes before the Core, and the Core before the Products are delivered. Finally, Service level, is the smallest deployment on the Platform and focuses on HELM (FuxCD) deployments and any DB Schema upgrades if required. We like to call these our continuous-delivery pipelines.

+

In each environment, there will be exactly one set of all platform-level (Core & Bootstrap) infrastructure and exactly one set of each of the product-level infrastructure configurations are deployed. Finally, the Service-level are added as the most granular. Taken together, these infrastructure levels fully constitute an application / business service.

+

Application/Service Deployments

+
    +
  1. Service (Microservice)
  2. +
+

The Services deployment focuses on the HELM Chart Deployments, using FluxCD. This is a GitOps approach to application deployments and may contain database schema upgrades where relevant. This design can be found here. We also use this pipeline to deploy ADP Shared Services onto the AKS Cluster. +

+

The layered delivery - What's deployed by what?

+

The following diagram shows the deployment layers, and the types of infrastructure that might be found in a given environment including the services.
+pipelines.png +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PipelineDescriptionSupported Components
BootstrapContains pipelines used for bootstrapping e.g. setting up service connectionsVNET, Service Connections, Service Principals
CoreContains pipelines used for install the ADP Platform Core resources e.g. AKS. These are all shared Platform components used by platform tenants.AKS Cluster, Networking (Vnet, NSGs, Subnets, UDRs), Key Vaults & config, Service Bus Core, Azure Front Door, Platform Storage Accounts, PostgreSQL Flexible server, Cosmos DB, Azure Redis Cache, App Insights, Log Analytics, Container Registry, AAD Configuration (App Registrations, AAD Groups etc.), App Configuration Service Core, Managed Identities
ProductContains pipelines used for onboarding & deploying services onto the ADP Platform (i.e. their infrastructure components)Service Bus - Queues, Topics & Subscriptions, Managed Identities & RBAC, Service Databases, Front Door DNS Profiles (public and private), App Configuration Key-Value Pairs, KeyVault Key-Value pairs, App Registrations & AAD Groups, Service Storage Accounts (& tables/containers, etc.)
ServicesContains Service pipelines for deploying into AKS Cluster with FluxCD (GitOps Pipelines)Service & ADP HELM (Application) Deployments with FluxCD, Database Migrations (Liquibase?)
+

CI Process

+
    +
  • TBC
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/application-deployments/index.html b/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/application-deployments/index.html new file mode 100644 index 0000000..30d16f3 --- /dev/null +++ b/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/application-deployments/index.html @@ -0,0 +1,2632 @@ + + + + + + + + + + + + + + + + + + + + + + + Application Deployments - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Application Deployments

+

Design Principles +- Application repositories will contain the application source code, docker files and helm charts to deploy the application.

+

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
Application-repo
+├─ Dockerfile
+├─ src/
+│  └─ ....
+└─ manifests/
+   └─ helm/
+      ├─ Chart.yaml
+      ├─ values.yaml
+      └─ templates/
+         └─ ....
+
+- All application deployments are managed with HelmRelease. +- All GitOps environments should use the main branch. Approaches such as a branch per environment have downsides and should be avoided. +- Each environment will have an environment-specific Azure Container Registry that the CI pipeline will push the docker images and artifacts (helm charts etc) to.

+

Promoting Changes across Environments

+

CI/CD pipelines should support both continuous deployment and continuous delivery.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/flux-configuration/index.html b/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/flux-configuration/index.html new file mode 100644 index 0000000..d0bb83e --- /dev/null +++ b/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/flux-configuration/index.html @@ -0,0 +1,2954 @@ + + + + + + + + + + + + + + + + + + + + + + + Flux Configuration - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Flux Configuration

+

Flux Build Deployment Completion Trigger

+

Context: These are the findings from the 'Build Deployment completion Trigger' Spike. The goal of the findings is to be able to Execute Post deployment tests when Flux CD has completed deploying a new application​.

+

image.png

+

Example of how to configure Event Hub as an external source and send notifications to it:

+

We can utilize the flux notification controller to dispatch events to external systems (Azure Function, Azure Event Hub, Slack, Teams) https://fluxcd.io/flux/components/notification/

+

Reference: +https://github.com/fluxcd/notification-controller/blob/main/docs/spec/v1beta2/providers.md#sas-based-auth

+

Using SAS based authentication:

+

Create a secret containing the shared access key: +kubectl create secret generic webhook-url --from-literal=address="Endpoint=sb://events-hub-adp-poc.servicebus.windows.net/;SharedAccessKeyName=flux-notifications;SharedAccessKey=an1ZOt9v90oycqy67rbcnEoXaIGecBLAH+AEhD/vy1g=;EntityPath=flux-events"

+

Create a Provider resource for Event Hub: +

1
+2
+3
+4
+5
+6
+7
+8
+9
apiVersion: notification.toolkit.fluxcd.io/v1beta2
+kind: Provider
+metadata:
+  name: azureeventhub
+  namespace: flux-config
+spec:
+  type: azureeventhub
+  secretRef:
+    name: webhook-url
+

+

Create an Alert resource for the type of Alerts we want to monitor: +

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
apiVersion: notification.toolkit.fluxcd.io/v1beta2
+kind: Alert
+metadata:
+  name: azureeventhub-alert
+  namespace: flux-config
+spec:
+  providerRef:
+    name: azureeventhub
+  eventSeverity: info
+  eventSources:
+    - kind: HelmRelease
+      name: '*'
+  inclusionList:
+    - ".*succeeded.*"
+

+

Example ADO Pipeline Callback task

+

To make ADO pipeline wait for Flux deployment completion we can utilize the AzureFunction@1 task. This will allow us to call an azure function asynchronously. The function can then poll a database ( or queue? ) and when a HelmRelease completion entry appears for the service, it will call back to the pipeline to continue:

+

Example AzureFunction@1 yaml:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
stages:
+  - stage: TestStage
+    jobs:
+      - job: TestJob
+        pool: server
+        timeoutInMinutes: 10
+        steps:
+
+          - task: AzureFunction@1
+            inputs:
+              function: 'https://adopipelineasyncfuncpoc.azurewebsites.net/api/AdoCallBack'
+              key: '$(callBackKey)'
+              method: 'POST'
+              body: |
+                '{
+                "helmReleaseName": "ffc-demo-web-infra-post-deploy",
+                "helmReleaseVersion": "4.32.30"  
+                }'
+              waitForCompletion: 'true'
+
+

Example of Function App that uses the callback completion mode

+

Configuring Flux V2 on AKS

+

Enable Flux Extension on the AKS Cluster

+

GitOps with Flux v2 is enabled as a cluster extension in Azure Kubernetes Service (AKS) clusters.

+

The microsoft.flux cluster extension is installed using a bicep template aks-cluster.bicep and an ADP Infra Pipeline platform-adp-core.

+

Below is a snippet of the code for enabling Flux on AKS

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
fluxExtension: {
+      autoUpgradeMinorVersion: true
+      releaseTrain: 'Stable'
+      configurationSettings: {
+        'helm-controller.enabled': 'true'
+        'source-controller.enabled': 'true'
+        'kustomize-controller.enabled': 'true'
+        'notification-controller.enabled': 'true'
+        'image-automation-controller.enabled': 'false'
+        'image-reflector-controller.enabled': 'false'
+      }
+      configurations: [ ...  
+
+      ]
+
+

Add Flux Customizations

+

After the microsoft.flux cluster extension has been installed, create a fluxConfiguration resource that syncs the Git repository source adp-flux-core to the cluster and reconcile the cluster to the desired state. With GitOps, the Git repository is the source of truth for cluster configuration and application deployment.

+

The Flux configuration links Flux to the ADP Flux Git repository and defines:

+
    +
  • The git repository that Flux should use
  • +
  • The branch you want to use e.g. main
  • +
  • The root Kustomization objects to run, which will then be used to deploy the rest of workloads (core services and business applications).
  • +
+

Refer to the documentation for the Flux repositories structure for details of the two Flux repositories (adp-flux-core and adp-flux-services) and folder structures.

+

The Flux Configuration has three Kustomizations

+ + + + + + + + + + + + + + + + + + + + + + + + + +
KustomizationPathPurpose
cluster./clusters//01Cluster level configurations e.g. Flux Controllers, CRDs
infra./infra//01Core Services e.g. Nginx Plus
Depends on the cluster Kustomization
services./services//01Business applications
Depends on the services Kustomization
+
+

The Kustomizations have been configured with dependencies to ensure the Flux deployments are done in the correct sequence, starting with the Cluster Kustomization, followed by Infra and lastly Services.

+
+

Below is a snippet of the Kustomizations configuration from aks-cluster.bicep and aks-cluster.parameters.json

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
kustomizations: {
+   cluster: {
+     path: fluxConfig.clusterCore.kustomizations.clusterPath
+     dependsOn: []
+     timeoutInSeconds: fluxConfig.clusterCore.kustomizations.timeoutInSeconds
+     syncIntervalInSeconds: fluxConfig.clusterCore.kustomizations.syncIntervalInSeconds
+     validation: 'none'
+     prune: true
+   }
+   infra: {
+     path: fluxConfig.clusterCore.kustomizations.infraPath
+     timeoutInSeconds: fluxConfig.clusterCore.kustomizations.timeoutInSeconds
+     syncIntervalInSeconds: fluxConfig.clusterCore.kustomizations.syncIntervalInSeconds
+     dependsOn: [
+       'cluster'
+     ]
+     validation: 'none'
+     prune: true
+   }
+   services: {
+     path: fluxConfig.services.kustomizations.servicesPath
+     timeoutInSeconds: fluxConfig.services.kustomizations.timeoutInSeconds
+     syncIntervalInSeconds: fluxConfig.services.kustomizations.syncIntervalInSeconds
+     retryIntervalInSeconds: fluxConfig.services.kustomizations.retryIntervalInSeconds
+     dependsOn: [
+       'infra'
+     ]
+     prune: true
+   }
+}
+
+

Although we have two flux Git repositories, we are using a single Flux customisation because we cannot set dependencies at the Flux Customisation level. Instead, we have a single Flux Customisation with three Kustomizations that will be deployed in sequential order Cluster > Infra > Services.

+

The Services Flux configuration contains a GitRepository and a Kustomization file that points to the Services Flux git repostory adp-flux-services using the path ./services/environments/<environment>/01

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/overview/index.html b/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/overview/index.html new file mode 100644 index 0000000..6008a26 --- /dev/null +++ b/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/overview/index.html @@ -0,0 +1,2711 @@ + + + + + + + + + + + + + + + + + + + + + + + GitOps for AKS - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

GitOps for AKS

+

GitOps Principles

+

GitOps is a set of principles for operating and managing a software system. Below are the principles of GitOps

+

According to GitOps principles, the desired state of a GitOps-managed system must be:

+
    +
  1. Declarative: A system that GitOps manages must have its desired state expressed declaratively. The declaration is typically stored in a Git repository.
  2. +
  3. Versioned and immutable: The desired state is stored in a way that enforces immutability and versioning, and retains a complete version history.
  4. +
  5. Pulled automatically: Software agents automatically pull the desired state declarations from the source.
  6. +
  7. Continuously reconciled: Software agents continuously observe actual system state and attempt to apply the desired state.
  8. +
+

Flux v2

+

You can use either Flux and Argo CD as GitOps operators for AKS. Both are Cloud Native Computing Foundation (CNCF) projects that are widely used. Refer to the Microsoft documentation GitOps for Azure Kubernetes Service.

+

Flux V2 is the GitOps operator that will be implemented for the ADP Platform.

+

Draft - Further discussion required on step 4

+

image.png

+

The CI pipeline uses either ADO Yaml Pipelines or Jenkins for build. For deployment, it uses Flux as the GitOps operator to pull and sync the app. The data flows through the scenario as follows:

+
    +
  1. The app code is developed by using an IDE such as Visual Studio Code.
  2. +
  3. A Pull request is raised to merge the changes into the main branch.
  4. +
+
    +
  • CI pipeline will deploy the changes to DEV and run automation tests as part of the PR review process.
  • +
  • PR approvers review changes
  • +
  • The app code is committed to a GitHub repository if CI Build is successful, the automation tests have passed and PR has been approved.
  • +
+
+

Updating the versions of the application in the Helm chart can be done automatically by the CI or manually by the developer before raising the PR.

+
+
    +
  1. The CI pipeline builds a container image from the app code and pushes the container image to the DEV environment Azure Container Registry.
  2. +
  3. The Flux operator detects configuration drift in the Git repository and pulls the configuration changes. An image policy is used by Flux to scan the ACR repository for new images.
  4. +
  5. Flux uses manifest files to deploy the app to the AKS cluster.
  6. +
+

Promoting Changes across Environments

+

Refer to the Wiki page Application Deployments

+

Core/Shared Services

+

Flux V2 will be used to bootstrap the baseline configuration of each cluster. The baseline configuration will comprise of the core services e.g.

+ + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/repository-setup/index.html b/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/repository-setup/index.html new file mode 100644 index 0000000..1dd0e51 --- /dev/null +++ b/Platform-Architecture/architectural-components/ci-cd-and-automation/gitops-for-aks/repository-setup/index.html @@ -0,0 +1,2933 @@ + + + + + + + + + + + + + + + + + + + + + + + Repository setup - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Repository Setup

+

The section below describes how the repositories have been set to handle multiple environments and teams with Flux V2.

+

There are different ways of structuring repositories as described in the Flux documentation.

+

Key considerations

+
    +
  • Teams will deliver new features and bug fixes using trunk-based development
  • +
  • A new application repository will be created for each service/application
  • +
  • The repository structure should support multi-tenancy
  • +
+

Repositories

+

Two repositories were created to ensure the separation of core infrastructure and application deployments and manifests.

+ + + + + + + + + + + + + + + + + +
RepositoryPurpose
adp-flux-coreThis will be the main repository used for configuring GitOps.`` It will contain the manifests to deploy the core services to the AKS Cluster e.g. for secrets management
adp-flux-servicesThis is a secondary repository that will be referenced by GitRepositories from the main repository adp-flux-core
+

adp-flux-core structure

+

The adp-flux-core contains the following top directories

+
    +
  • clusters directory contains the Flux configuration per cluster.
  • +
  • infra directory contains the environment subfolders each containing base and overlays folders. Overlays are used to minimise duplication. Kustomizations are used to deploy the core services, such as Nginx Plus. The base folder for each environment will contain a list of the core services defined in the core folder that should be deployed for a specific environment. Folders 01/02 represent cluster instances within an environment. These overlay folders 01/02 contain environment specific Kustomize patches that contain the environment specific settings.
  • +
  • Core folder contains the Flux configurations (HelmRepository and HelmRelease) CRD manifests used for installing the core services
  • +
  • Services folder contains a GitRepository and Kustomization per environment that points to a path in the adp-flux-services repository. For example, the Kustomization for snd/01 will to point to the path services/environments/snd/01 in the adp-flux-services repository.
  • +
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
.
+├── clusters                                         # Flux configuration for the Flux System (controllers etc)/
+│   ├── snd                                          # snd environment specific cluster configuration/
+│   │   ├── base
+│   │   └── 01/02                                    # base flux configuration gotk-components.yaml, gotk-sync.yaml
+│   ├── pre/
+│   │   ├── base
+│   │   └── 01/02
+│   ├── pre/
+│   │   ├── base
+│   │   └── 01/02
+│   └── prod/
+│       ├── base
+│       └── 01/02  
+├── infra                                            # Flux configuration for the core services e.g. Nginx Plus/
+│   ├── snd                                          # Contains infra configuration for SND core services /
+│   │   ├── base                                     # References 1 or more core services to be deployed
+│   │   └── 01/02                                    # Overlay containing the patches, references base
+│   ├── dev/
+│   │   ├── base
+│   │   └── 01/02
+│   ├── pre/
+│   │   ├── base
+│   │   └── 01/02
+│   └── prod/
+│       ├── base
+│       └── 01/02
+├── core                                            # Contains the manifests for the core service (HelmRelease, HelmRepository)/
+│   ├── nginxplus                                  /
+│   │   ├── nginx-ingress-chart.yaml
+│   │   └── nginx-ingress-release.yaml
+│   └── certmanager
+└── services                                        # Flux configuration for business applications/
+    ├── gitrepository.yaml                          # GitRepository resource for adp-flux-services
+    ├── snd                                         # environment/
+    │   └── 01                                      # environment instance number    /
+    │       ├── Kustomization.yaml                  # References GitRepository, services.yaml
+    │       └── services.yaml                       # This is a Kustomization file that references gitrepository.yaml and specifies the path
+    ├── dev/
+    │   └── 01  /
+    │       ├── Kustomization.yaml
+    │       └── services.yaml
+    ├── pre/
+    │   └── 01  /
+    │       ├── Kustomization.yaml
+    │       └── services.yaml
+    └── prod/
+        └── 01  /
+            ├── Kustomization.yaml
+            └── services.yaml        
+
+

You can use the markdown generator tool to update the above folder structure

+

adp-flux-services structure

+

The adp-flux-core contains the following top directories

+
    +
  • services directory contains the Flux configurations used for deploying the business applications.
  • +
+

Tenants

+
    +
  • Tenants refers to the application teams that are responsible for the development of one or more business applications.
  • +
  • A unique namespace will be created for each tenant
  • +
  • All applications owned by a specific tenant on the ADP platform will be deployed to the same tenant namespace
  • +
+

Below is a description of the subfolders inside the services folder.

+ + + + + + + + + + + + + + + + + + + + + +
SubfolderPurpose
basethe base folder contains manifests that are common to each tenant e.g. namespace, ResourceQuota and ServiceAccount. These manifests are generic, in that they have variables that can be specified at the time of onboarding.
environmentsThis contains the environment subfolders each containing base and overlays folders. Overlays are used to minimise duplication.Kustomizations are used to deploy the business services
tenantsThese are the application teams
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
.
+└── services/
+    ├── base/
+    │   └── namespace.yaml
+    ├── environments/
+    │   ├── snd/
+    │   │   └── 01/
+    │   │       └── kustomization.yaml                # references business services
+    │   ├── dev/
+    │   │   └── 01/02
+    │   ├── pre/
+    │   │   └── 01/02
+    │   └── prod/
+    │       └── 01/02  
+    └── tenant-a                                      # Team level configuration e.g. namespace, notification/
+        ├── kustomization.yaml                        # References common manifests defined in apps/base e.g. namespace.yaml
+        ├── tenant-a-service1/
+        │   ├── helmrelease.yaml                      # tenant a owns 1 or more services
+        │   ├── helmrepository.yaml
+        │   ├── snd.yaml                              # environment specific patch for snd environment
+        │   ├── dev.yaml                              # environment specific patch for dev environment
+        │   ├── pre.yaml                              # environment specific patch for pre environment
+        │   └── prod.yaml                             # environment specific patch for prod environment
+        └── tenant-b-service1/
+            ├── helmrelease.yaml
+            ├── helmrepository.yaml
+            ├── dev.yaml
+            ├── snd.yaml
+            ├── pre.yaml
+            └── prod.yaml
+
+

You can use the markdown generator tool to update the above folder structure

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/ci-cd-and-automation/infrastructure-pipelines/index.html b/Platform-Architecture/architectural-components/ci-cd-and-automation/infrastructure-pipelines/index.html new file mode 100644 index 0000000..b34785b --- /dev/null +++ b/Platform-Architecture/architectural-components/ci-cd-and-automation/infrastructure-pipelines/index.html @@ -0,0 +1,2670 @@ + + + + + + + + + + + + + + + + + + + + + + + Infrastructure Pipelines - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Infrastructure Pipeline Enhancements

+

The Platform core pipeline will be able to deploy all components (resources) that is within the defined environment. The pipeline will be able to deploy resources based on the resource category and/or resource type. This article describes the structure for the deployment of the resources based on ADO Pipelines and associated YAML files.

+

Key Principles

+

Prior to this enhancements, when deploying or testing a resource, developers will comment out some parts of codes within the deploy-platform-core.yaml file in order to deploy only the relevant templates. The other approach is to deploy the entire resources which takes more time and slows down processes. This latter approach may also come at an extra cost.

+

Azure Resource Deployment

+

Wih the enhancement, developers can deploy resources based on the below categories: + - All + - Network - All + - Network - VNET + - Network - NSGs + - Network - Route Tables + - Monitoring - All + - Monitoring - Workspace + - Monitoring - Insights + - Monitoring - Azure Monitor Workspace + - Monitoring - Grafana + - Managed Cluster - All + - Managed Cluster - Private DNS Zone + - Managed Cluster - Cluster + - Front Door - All + - Front Door - Core + - Front Door - WAF + - Application - All + - Application - App Configuration + - Application - Apps Container Registry + - Application - PostgreSql Server + - Application - Redis Cache + - Application - Storage Account + - Application - Service Bus + - Application - Key Vault

+

Deployment process +On ADO pipeline, the above category of resources can be selected in a dropdown menu. All resources can be deployed when the option 'All' is selected. This option will deploy all components of Network, Monitoring, Managed Cluster, Front Door and Application. +However, if a developer or tester needs to deploy only network resources, the selection will be 'Network - All'. With this option, all the network resources (VNET, NSGS and Route Tables) will be deployed.

+

Similarly, if the deployment is for testing a specific resource within network category, for example, VNET, they will be able to do so by selecting 'Network -VNET' option from the dropdown list. This will only deploy VNET template as defined in the yaml file.

+

Implementation approach +The categorisaton for resources have been applied with the use of 'parameters' defined within the deploy-platform-core-yaml file. The default value is 'All'.

+

Furthermore, 'variables' have been used to define each of the values listed in the 'parameters' section.

+

Conditional statements are subsequently applied to filter the resources in the 'groupedTemplates'.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/ci-cd-and-automation/naming-conventions-and-structures/index.html b/Platform-Architecture/architectural-components/ci-cd-and-automation/naming-conventions-and-structures/index.html new file mode 100644 index 0000000..37940f9 --- /dev/null +++ b/Platform-Architecture/architectural-components/ci-cd-and-automation/naming-conventions-and-structures/index.html @@ -0,0 +1,2770 @@ + + + + + + + + + + + + + + + + + + + + + + + Naming Conventions & Structures - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Pipeline Naming Conventions & Structures

+

All pipelines on the Platform will have a specific set of naming conventions to ensure consistency, standardization and readability. This article describes the naming conventions of ADO Pipelines and associated YAML files.

+

The conventions

+

platform-<project>-<purpose>

+

For the ADO Pipelines: 'Platform' highlights the fact it is a Platform pipeline. 'Project' is always ADP for the Platform. 'Purpose' is typically either: core, bootstrap, product or service. For the YAML files, we try and maintain the same naming convention, except all files should be prefixed with: 'deploy' or 'ci'.

+

Azure DevOps Pipelines

+

Based on the types of pipelines and their purposes, the following naming conventions have been identified for ADO (UI Display Name):

+

Core Infrastructure

+
    +
  • platform-adp-core
  • +
  • platform-adp-bootstrap-serviceconnections
  • +
+

Products

+
    +
  • platform-adp-products
  • +
+

Services

+
    +
  • platform-adp-sevices + <br>
  • +
+

The ADO Pipelines will all be created in the existing DEFRA-FFC ADO project under the ADP Subfolder.

+

Pipeline Matrix

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FolderDescriptionPipelinesYAML Pipelines
BootstrapContains pipelines used for bootstrapping e.g. setting up service connectionsplatform-adp-bootstrap-serviceconnectionsdeploy-platform-bootstrap-serviceconnections
CoreContains pipelines used for install the ADP Platform Core resources e.g. AKS. These are all shared Platform components used by platform tenants.platform-adp-coredeploy-platform-core
ProductContains pipelines used for onboarding & deploying services onto the ADP Platform (i.e. their infrastructure components)platform-adp-products (not implemented yet)deploy-platform-products
ServicesContains Service pipelines for deploying into AKS Cluster with FluxCD (GitOps Pipelines)platform-adp-services (not implemented yet)deploy-platform-services
+

Folder Structure - ADP Infrastructure

+

Core infrastructure will reside within GitHub in ADP Infrastructure. There are planned to be other infrastructure and FluxCD repos, of which the design is currently in progress. It is proposed that the infrastructure that is dedicated for our tenant services (products) will reside with ADO-Infrastructure-Services*.

+

All infrastructure will be within the 'infra' folder. 'Core' designates the Platform Core Shared Infrastructure that is used by all the platform projects & services (tenants). 'Services' designates that the folder contains only infrastructure and configuration dedicated to that project/service (tenant).

+

Each Module instantiation file will be within it's own folder, broken down with the following conventions below (as per the Bicep Module registry convention):

+
    +
  • The following folder structure for Modules: Infra/<module-name>/<module-name>.<extension>
  • +
  • The file extension will either be: .bicep or .biceparams for infrastructure modules.
  • +
  • For PowerShell and PowerShell modules the extension will be: .ps1 or psm1
  • +
  • For .yaml for YAML Files (i.e. ADO Pipelines)
  • +
+


+

Infrastructure repository diagram for ADP-Infra-Core and ADP-Infra-Services

+

infra-repo.png

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/istio-service-mesh-poc/index.html b/Platform-Architecture/architectural-components/istio-service-mesh-poc/index.html new file mode 100644 index 0000000..0fb8ba3 --- /dev/null +++ b/Platform-Architecture/architectural-components/istio-service-mesh-poc/index.html @@ -0,0 +1,2921 @@ + + + + + + + + + + + + + + + + + + + + + + + Istio Architecture POC - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Istio Service Mesh POC

+ +

Istio Architecture

+
+

TODO

+

This page is a work in progress and will be updated in due course.

+
+

This document details the findings of Istio Service Mesh, including some features and integration with Flux.

+

An Istio service mesh is logically split into a data plane and a control plane.

+

The data plane is composed of a set of intelligent proxies (Envoy) deployed as sidecars. These proxies mediate and control all network communication between microservices. They also collect and report telemetry on all mesh traffic.

+

The control plane manages and configures the proxies to route traffic.

+

The following diagram shows the different components that make up each plane:

+

image.png

+

Reference: Istio Architecture

+

Installation

+

Because we are using Nginx as our ingress controller, the following document was referenced to code the installation in adp-flux-core and adp-flux-services. +NGINX Ingress Controller and Istio Service Mesh

+

image.png

+

Features

+
    +
  • +

    MTLS

    +

    Istio automatically configures workload sidecars to use mutual TLS when calling other workloads. By default, Istio configures the destination workloads using PERMISSIVE mode. When PERMISSIVE mode is enabled, a service can accept both plaintext and mutual TLS traffic. In order to only allow mutual TLS traffic, the configuration needs to be changed to STRICT mode.

    +
  • +
+

Reference: MTLS

+

image.png

+

image.png

+

image.png

+

Here is an example of applying STRICT mtls at the namespace level:

+
1
+2
+3
+4
+5
+6
+7
+8
apiVersion: security.istio.io/v1beta1
+kind: PeerAuthentication
+metadata:
+  name: default
+  namespace: ffc-demo
+spec:
+  mtls:
+    mode: STRICT
+
+
    +
  • +

    Circuit Breaking

    +

    Circuit breaking is an important pattern for creating resilient microservice applications. Circuit breaking allows you to write applications that limit the impact of failures, latency spikes, and other undesirable effects of network peculiarities.

    +

    Reference: Circuit breaking

    +

    Istio uses DestinationRule to configure circuit breakers.

    +

    Here is a sample DestinationRule with circuit breaker rules:

    +
  • +
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
apiVersion: networking.istio.io/v1alpha3
+kind: DestinationRule
+metadata:
+  name: httpbin
+spec:
+  host: httpbin
+  trafficPolicy:
+    connectionPool:
+      tcp:
+        maxConnections: 1
+      http:
+        http1MaxPendingRequests: 1 #  limits the number of requests that can be queued
+        maxRequestsPerConnection: 1 # Previous request has to complete before next one is sent
+    outlierDetection:
+      consecutive5xxErrors: 2
+      interval: 1s
+      baseEjectionTime: 30s
+      maxEjectionPercent: 100
+
+
    +
  • +

    Fault Injection

    +

    While Envoy sidecar/proxy provides a host of failure recovery mechanisms to services running on Istio, it is still imperative to test the end-to-end failure recovery capability of the application as a whole. Misconfigured failure recovery policies (e.g., incompatible/restrictive timeouts across service calls) could result in continued unavailability of critical services in the application, resulting in poor user experience.

    +

    Istio enables protocol-specific fault injection into the network, instead of killing pods, delaying or corrupting packets at TCP layer. Our rationale is that the failures observed by the application layer are the same regardless of network level failures, and that more meaningful failures can be injected at the application layer (e.g., HTTP error codes) to exercise the resilience of an application.

    +

    Operators can configure faults to be injected into requests that match specific criteria. Operators can further restrict the percentage of requests that should be subjected to faults. Two types of faults can be injected: delays and aborts. Delays are timing failures, mimicking increased network latency, or an overloaded upstream service. Aborts are crash failures that mimic failures in upstream services. Aborts usually manifest in the form of HTTP error codes, or TCP connection failures.

    +
  • +
+

Reference: Fault Injection, https://imesh.ai/blog/traffic-management-and-network-resiliency-with-istio-service-mesh/

+ +

Multi-Cluster setup

+

It is possible to have a multi-cluster setup for Istio. https://istio.io/latest/docs/setup/install/multicluster/ .

+

Our setup is slightly different than the instructions because we are using Niginx Ingress Controller, so we will have to investigate how to get Multi-Cluster setup with Nginx and Istio.

+

Istio Supported Releases

+

Additional Notes:

+
    +
  • To enable end to end TLS, we will need Istio in place. We will need to investigate how to configure end to end TLS with Azure Frontdoor>Nginx>Istio
  • +
  • There is also a Kiali plugin available for Backstage, which we should look into and implement if it's any good.
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/microservices-and-aks/index.html b/Platform-Architecture/architectural-components/microservices-and-aks/index.html new file mode 100644 index 0000000..d0f7903 --- /dev/null +++ b/Platform-Architecture/architectural-components/microservices-and-aks/index.html @@ -0,0 +1,2670 @@ + + + + + + + + + + + + + + + + + + + + + + + Microservices & AKS - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Microservices & AKS

+

This document details the example microservice architecture that is being developed on the Platform. It will detail the logic and physical separation between Clusters, Node Pools, Nodes, Namespaces and Pods and how they map to projects and programmes.

+

Requirements

+
    +
  • Logical and Physical separation between environments, projects & programmes
  • +
  • Allow for individual scalability and ensure other projects and programmes cannot starve other resources (resource limits and quotas)
  • +
+

Decisions & Outcomes

+
    +
  • The System and User Nodepools will be separated.
  • +
  • The System Nodepool will only contain system resources (Flux, NGINX, Workload ID etc.)
  • +
  • Programmes will have separate Nodepools, per programme (BBaT, FCP, etc.) or specific resource requirements and demands
  • +
  • There will be an individual Namespace per project in a given Nodepool (Project A, Project B, namespace etc.)
  • +
  • Namespaces will have resource and quota limits defined
  • +
  • NGINX Plus is the Ingress Controller (load balancing internal traffic) with a Private IP
  • +
  • Azure Front Door is the edge WAF and CDN, with Public IP
  • +
  • Node (VM) sizes to be defined / used existing FCP convention
  • +
  • The ILB defined below allows a Private Link Service from the AFD to connect into AKS
  • +
+

Examples

+

The below diagram generally illustrates these requirements and separation. The namespaces provide the Logical Separation, and the separate Clusters provide Physical Separation. An example service is illustrated below, with the types of resources that can be deployed, how they integrate with the Hub/Spoke networking and Egress through the outbound firewall - CCoE Managed Palo Alto's.

+

aks-microservices-advanced-production-deployment.png

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/monitoring/alerts/index.html b/Platform-Architecture/architectural-components/monitoring/alerts/index.html new file mode 100644 index 0000000..d8e946e --- /dev/null +++ b/Platform-Architecture/architectural-components/monitoring/alerts/index.html @@ -0,0 +1,2747 @@ + + + + + + + + + + + + + + + + + + + + + + + Alerts - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Alerts in ADP

+

Azure Monitor for containers now includes recommended alerts. These preconfigured metrics alerts enable monitoring the system resources when they are running on peak capacity or hitting failure rates.

+

image.png

+

Metric Alert Rules

+

Metric alerts in Azure Monitor proactively identify issues related to system resources of your Azure resources, including monitored Kubernetes clusters.

+
+

Container insights provides preconfigured alert rules so that we will use those as starting point...

+

Container insights in Azure Monitor now supports alerts based on Prometheus metrics, and metric rules will be retired on March 14, 2026. If you already use alerts based on custom metrics, you should migrate to Prometheus alerts and disable the equivalent custom metric alerts. As of August 15, 2023, you will no longer be able to configure new custom metric recommended alerts using the portal.

+
+

Metric alert rules in Container insights (preview)

+

Prometheus rules

+

Prometheus alert rules use metric data from your Kubernetes cluster sent to Azure Monitor managed service for Prometheus.

+
+

Enable Prometheus Alert Rules by deploying the community and recommended alerts using the Bicep template. Follow the README.md file in the same folder for how to deploy. +https://github.com/Azure/prometheus-collector/blob/main/AddonBicepTemplate/AzureMonitorAlertsProfile.bicep

+
+

Configure alertable metrics in ConfigMaps

+

The tutorial below specifies how you can configure the alertable metrics in ConfigMaps.

+

Additional Documentation

+ +

Flux Alerts

+

Flux Alerts are configured to notify teams about the status of their GitOps pipelines.

+

The Flux controllers emit Kubernetes events whenever a resource status changes. You can use the notification-controller to forward these events to Slack, Microsoft Teams, Discord and others. The notification controller is part of the default Flux installation

+

The following alerts will be configured for the following scenarios:

+
    +
  • Reconciliation failures in the cluster
  • +
  • A new version of an app was deployed and if the deployment is healthy
  • +
+

Slack Integration

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/monitoring/automated-monitoring-implementation/index.html b/Platform-Architecture/architectural-components/monitoring/automated-monitoring-implementation/index.html new file mode 100644 index 0000000..d5e038a --- /dev/null +++ b/Platform-Architecture/architectural-components/monitoring/automated-monitoring-implementation/index.html @@ -0,0 +1,2596 @@ + + + + + + + + + + + + + + + + + + + + + + + Automated monitoring implementation - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Automated Monitoring Implementation - Enable Prometheus Logs

+

This section details how the AKS Prometheus logs were enabled via Automation. The following documents were referenced:

+

https://learn.microsoft.com/en-us/azure/azure-monitor/containers/prometheus-metrics-enable?tabs=bicep

+

https://github.com/slavizh/BicepTemplates/blob/main/monitor-prometheus/aks-resources.bicep

+

These are the steps that were carried out:

+
    +
  1. +

    The 'Monitoring Data Reader' role was given to the Grafana system assigned identity on the Azure Monitor Workspace, so Grafana can query metrics. Bicep Template

    +
  2. +
  3. +

    A Data Collection Rule Association was created between the AKS Cluster and the Azure Monitor Workspace. Bicep Template

    +
  4. +
  5. +

    The default metrics prometheusRuleGroups provided by Microsoft were added to the automation in order to populate the Dashboards in Grafana. Bicep Template

    +
  6. +
  7. +

    The azureMonitorProfile metrics were enabled in the AKS Bicep Module Bicep Template

    +
  8. +
+

Prometheus Log Retention +Managed Prometheus includes 18 months of data retention. This is included as part of the service and there is no additional charge for storage and retention.

+

https://azure.microsoft.com/en-gb/updates/general-availability-azure-monitor-managed-service-for-prometheus/ (Opens in new window or tab)

+

https://techcommunity.microsoft.com/t5/azure-observability-blog/introducing-azure-monitor-managed-service-for-prometheus/ba-p/3600185 (Opens in new window or tab)

+

Managed Prometheus Dashboard example: +image.png

+

Automate creation of Flux Dashboards

+

This section details how the Flux Dashboard creation and population was automated. The following document was referenced:

+

https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/monitor-gitops-flux-2

+

These are the steps that were carried out:

+
    +
  1. +

    The 'Grafana Admin' permission was granted to the ADO SSV3 (ADO-DefraGovUK-AAD-ADP-SSV3) service principal on the Azure Managed Grafana instance. This is required to allow the pipeline to create the Dashboards in Grafana

    +
  2. +
  3. +

    A PowerShell script was created to check if the 'Flux' folder and the new dashboards exist. If they don't exist the script will create them. PowerShell Script

    +

    The Dashboard json templates were taken from: + GitOps Flux - Application Deployments Dashboard + Flux Control Plane + Flux Cluster Stats

    +
  4. +
  5. +

    The 'Reader' permission was granted to the Grafana system assigned identity on the environment subscription. e.g. AZD-ADP-SND1

    +
  6. +
  7. +

    Configure Azure Monitor Agent to scrape the Azure Managed Flux metrics by creating a configmap. This change was made in the adp-flux-core repository.

    +
  8. +
+

SND1 Grafana Instance

+

Flux Dashboard Example: +image.png

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/monitoring/network-watcher/index.html b/Platform-Architecture/architectural-components/monitoring/network-watcher/index.html new file mode 100644 index 0000000..a50176d --- /dev/null +++ b/Platform-Architecture/architectural-components/monitoring/network-watcher/index.html @@ -0,0 +1,2643 @@ + + + + + + + + + + + + + + + + + + + + + + + Network Watcher - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Network Watcher

+

Azure Network Watcher provides a suite of tools to monitor, diagnose, view metrics, and enable or disable logs for the ADP Platform, specifically, the AKS Clusters.

+
+

Network Watcher is enabled automatically in a virtual network's region when we create or update the virtual network in a subscription.

+
+

The Network Watcher resource in each ADP Platform subscription and region is created in the NetworkWatcherRG resource group.

+

Below is a screenshot for Network Watcher instances in the Sandpit environments.

+

image.png

+

NSG Flow Logs

+

Flow Logs are enabled for the NSGs associated with AKS Cluster subnets. Flow Logs are vital to monitor, manage, and know the ADP Platform virtual networks (one per environment) so that they can protected and optimised. They enable tracking and being able to monitor the following: +- Current state of the network +- Who is connecting, and where users are connecting from. +- Which ports are open to the internet +- What network behavior is expected, what network behavior is irregular, and when sudden rises in traffic happen.

+
+

Network Security Group (NSG) Flow Logs Retention Period is set to 30 days.

+

Retention is available only if you use general-purpose v2 storage accounts. An ADP Platform storage account has been created in each ADP subscription for the flow logs.

+
+

VNET Flow Logs

+

Network Watcher VNet flow logs capability overcomes some of the existing limitations of NSG flow logs. e.g. VNet flow logs avoid the need to enable multi-level flow logging such as in cases of NSG flow logs where network security groups are configured at both subnet & NIC.

+
+

VNet flow logs is currently in PREVIEW. So will not be implemented until it is GA. The preview version is not the available in UK South and UK West regions.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/monitoring/overview/index.html b/Platform-Architecture/architectural-components/monitoring/overview/index.html new file mode 100644 index 0000000..262839f --- /dev/null +++ b/Platform-Architecture/architectural-components/monitoring/overview/index.html @@ -0,0 +1,2710 @@ + + + + + + + + + + + + + + + + + + + + + + + ADP Monitoring Overview - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Monitoring

+

Azure Monitor will be used to monitor the health and performance of the Kubernetes clusters and the workloads running on them.

+

The AKS Cluster generates metrics (Platform and Prometheus Metrics) and logs (Activity and Resource Logs), refer to Monitoring AKS data reference for detailed information. Custom metrics will be enabled automatically since the AKS cluster uses managed identity authentication.

+

+

Source: https://learn.microsoft.com/en-us/azure/aks/monitor-aks

+

The diagram below shows the different levels of monitoring.

+

Diagram of layers of Kubernetes environment with related administrative roles.

+

Source: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/monitor-kubernetes

+

Container Insights

+

Azure Monitor now offers a unified cloud native offering for Kubernetes monitoring +- Azure Monitoring Managed Service for Prometheus +- Azure Monitor Container Insights +- Azure Managed Grafana

+

Container Insights stores its data in a Log Analytics workspace. Therefore an ADP Platform Log Analytics Workspace has been created to store the AKS metrics and logs.

+

Enable Container insights for Azure Kubernetes Service (AKS) cluster

+

After enabling Container Insights, you will be able to view the AKS Cluster in the list of monitored clusters.

+

image.png

+

There are many reports available for Node Monitoring, Resource Monitoring, Billing, Networking and Security.

+

image.png

+

The diagram below shows the insights on the Nodes. The other tabs when clicked would show insights for the Cluster, Controllers or Containers.

+

image.png

+

Azure Managed Grafana

+

There are many benefits to using the managed services, such as, automatic authentication and authorisation setup based on Azure AD identities and pre-built roles (Grafana Admin, Grafana Editor and Grafana Viewer). The managed Grafana service also comes with the capability to integrate with various Azure data sources through an Azure managed identity and RBAC permissions on your subscriptions. It also comes with default Grafana Dashboards as a base.

+

The managed Grafana services has been installed as a shared resource in the SSV3 and SSV5 subscriptions, which are in the O365_DefraDEV and DEFRA Tenants respectively. SSV3 is used for the sandpit environments whilst SSV5 will be used for all environments in the DEFRA Tenant. These are DEV, DEMO, PRE and PROD.

+

image.png

+

Implementation of the Managed Grafana Instance

+
    +
  1. Create an Azure Managed Grafana instance as a Shared resource (in SSV3 or SSV5) + a. Create and configure an environment specific Azure Monitor workspace + b. Link the Azure Monitor workspace to the Grafana instance
  2. +
  3. Enable Prometheus metrics collection by adding the AKS cluster to the monitored clusters + a. Define alerts
  4. +
  5. Create Prometheus rule groups: Bicep template
  6. +
+

Azure Monitoring Managed Service for Prometheus

+

Azure Monitor managed service for Prometheus is a fully managed Prometheus-compatible service that supports industry standard features such as PromQL, Grafana dashboards, and Prometheus alerts.

+

thumbnail image 1 captioned Azure Monitor managed service for Prometheus overview diagram

+

Azure Monitor managed service for Prometheus overview diagram

+

This service requires configuring the metrics addon for the Azure Monitor agent, which sends data to Prometheus. +Azure Monitor managed service for Prometheus GA 23 May 2023. +General Availability: Azure Monitor managed service for Prometheus

+

Image

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architectural-components/secrets-and-configuration/index.html b/Platform-Architecture/architectural-components/secrets-and-configuration/index.html new file mode 100644 index 0000000..11b80a6 --- /dev/null +++ b/Platform-Architecture/architectural-components/secrets-and-configuration/index.html @@ -0,0 +1,2838 @@ + + + + + + + + + + + + + + + + + + + + + + + Secrets & Configuration - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Secrets & Configuration

+

Solution Overview

+

FCP ADP Architecture_2023-10-11_14-23-20.png

+

Azure Architecture (green), per ADP environment:

+
    +
  1. A single key vault which will be used by all business services to store their workload service's secrets. Each secret within this key vault will be RBAC controlled to selected Workload identity of the Workload service.
  2. +
  3. A single App Configuration will store the configuration and links to key vault secrets for all business services and their workload services.
  4. +
  5. Within the helm charts of each workload service, the App Configuration provider must filter via the label detailing the Workload Services name to get the applications new configuration. This will be applied to the AKS via flux.
  6. +
+

Inserting developer provided service secrets & configuration (black):

+
    +
  1. Configuration of values and linking to key vault will be defined is the service repos. Within a appConfig folder at the root. (see section on Defining developer provided service secrets & configuration
  2. +
  3. When the CI/CD pipeline is ran to build and deploy the workload service it will include these configuration files. The pipeline will provide an option to only push the configuration if needed.
  4. +
  5. ADO Variable Groups will be used for the developers to define the secrets of their services which are not common or platform secrets (see ADO Variable Groups for Secrets)
  6. +
  7. Secrets will be retrieved from the environment business service's ADO Variable Group but filtering on secrets which match the workload service name. Resulting in only the required secrets for the selected service being pushed into key vault.
      +
    • For any new secret in the key vault should have BBAC permissions assigned to allow the workload service's workload identity get the secret and allow the technical lead group of the business service to get & view the secret.
    • +
    +
  8. +
  9. Compile the appConfig files into a useable state (token replace, etc). Pushes the transformed appConfig into App Config within the chosen environment.
  10. +
  11. Restart or deploy effected pods to allow configuration to be used by the application.
  12. +
+

Inserting common/ platform provided secrets & configuration for services to use (purple):

+
    +
  1. Inside the ADP Infra Core on common platform configuration configuration and links key vaulted secrets will be defined as parameters within the App Configurations bicep params template.
  2. +
  3. It will be deployed to the app configuration via the Infra Core pipeline in the same way any other infrastructure would be. These configurations will be assigned to label of “common”.
  4. +
  5. In order to get common secrets into key vault ADP Platform team will need to input them manually in each environments key vault when necessary.
  6. +
+

Defining developer provided service secrets & configuration

+

FCP ADP Architecture_2023-10-10_12-21-35.png

+

0.1 - 1st pass

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
 [
+        {
+            "key": "Logging:LogLevel:Debug",
+            "value": "true",
+            "label": "{{serviceName}}",
+            "content_type": "",
+            "tags": {}
+        },
+        {
+            "key": "exampleConfig",
+            "value": "true",
+            "label": "{{serviceName}}",
+            "content_type": "",
+            "tags": {}
+        },
+        {
+            "key": "exampleConfig2",
+            "value": "Hello World! Welcome to {{serviceName}}, {{env}}!",
+            "label": "{{serviceName}}",
+            "content_type": "",
+            "tags": {}
+        },
+        {
+            "key": "exampleSecret",
+            "value": "{\"{{resourceid}}/{{servicename}}-examplesecret\"}",
+            "label": "{{serviceName}}",
+            "content_type": "{{keyVaultContentType}}",
+            "tags": {}
+        }
+]
+
+

1.0 - Ideal solution

+

Example appConfig.yaml

+
1
+2
+3
+4
+5
+6
+7
+8
+9
- key: Logging:LogLevel:Debug
+  value: "true"
+- key: exampleConfig
+  value: "true" 
+- key: exampleConfig2
+  value: Hello World! Welcome to {{serviceName}}, {{env}}!
+- key: exampleSecret
+  value: "{{servicename}}-examplesecret"
+  type: "keyvault"
+
+

Example appConfig.dev.yaml

+
1
+2
- key: exampleConfig3
+  value: My dev secret
+
+

Example appConfig.test.yaml

+
1
+2
- key: exampleConfig3
+  value: My test secret
+
+

ADO Variable Groups for Secrets

+

ADO Variable Groups will be created per environment per business service (namespace) and be created automatically at part of the business services setup.

+

This will be used for each services secrets following this format {program}-{project}-{env}. For example:

+
    +
  • ffc-grants-snd1
  • +
  • ffc-grants-dev
  • +
  • ffc-grants-test
  • +
  • ffc-grants-pre
  • +
  • ffc-grants-prd
  • +
+

For secrets within these variable groups they will need to fellow this naming convention of {program}-{project}-{service}-{var name}. For example:

+
    +
  • ffc-gants-frontend-apikey1
  • +
  • ffc-gants-frontend-mysecret
  • +
+

Developer Configuration

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/architecture-overview/index.html b/Platform-Architecture/architecture-overview/index.html new file mode 100644 index 0000000..abc19b7 --- /dev/null +++ b/Platform-Architecture/architecture-overview/index.html @@ -0,0 +1,2549 @@ + + + + + + + + + + + + + + + + + + + + + + + Architecture Overview - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Architecture Overview

+

Whos is this for:

+
    +
  • Platform Team
  • +
  • Delivery Project Tech Leads
  • +
  • Delivery Project Architects
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/environments/index.html b/Platform-Architecture/environments/index.html new file mode 100644 index 0000000..ca8d9fb --- /dev/null +++ b/Platform-Architecture/environments/index.html @@ -0,0 +1,2758 @@ + + + + + + + + + + + + + + + + + + + + + + + Environments in ADP - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +
+

TODO

+

This page is a work in progress and will be updated in due course. Needs environments updated.

+
+

Environments in ADP

+

The table below details the environments the Platform supports, there purposes, and whether they're mandatory for going live / on the RTL path.

+
    +
  • 1-1 mapping between Services/Tenants Environments & Azure Subscriptions
  • +
  • The number at the end of the environment code designates the Subscription/Environment number for CCoE reference.
  • +
  • A Tenant/Customer/Service means a 'customer or team user the platform' and not the Platform team itself.
  • +
  • Infrastructure-Dev is only for Platform Engineers.
  • +
+

Core ADP Environments for Tenants & Infrastructure teams

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Principal Environment NameUse caseRoute-to-liveAzure Environment Code/SubscriptionAdditional InformationAzure Tenant
Tenant-ProductionLive Services, Public & Private beta.YesAZR-ADP-PRD1Defra
Tenant-Pre-ProductionAutomated Acceptance testing prior to production.YesAZR-ADP-PRE1VPN Required.Defra
Tenant-DemoExternal demonstrations, PEN tests etc.NoAZR-ADP-TST2VPN Required.Defra
Tenant-TestGeneral testing, performance testing, exploratory testingNoAZR-ADP-TST1Intended for demo's to external and internal stakeholdersDefra
Tenant-DevelopmentDevelopmentYesAZR-ADP-DEV1VPN Required.Defra
Tenant-SandpitPre-merge automated tests, Pull Request checks etc.NoAZR-ADP-SND4VPN Required.Defra
Infrastructure-DevTesting infrastructure changes, proof of concepts, experimentations. Platform Team Only.No/N/AAZD-ADP-SND1, SND2, SND3VPN Required.0365_DefraDev
+

Shared Services and Management Subscriptions

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Principal Environment NameUse caseRoute-to-liveAzure Environment Code/SubscriptionAdditional InformationAzure Tenant
Shared Services 3Management and Shared Services - Test and all environments below. POC/development area.No/N/AAZD-ADP-SSV3DefraDev Shared Services/management0365_DefraDev
Shared Services 5Management and Shared Tests - Production and all environments below. Live services.No/N/AAZR-ADP-SSV5Contains live ACR. Live shared services/management.Defra
+

The Subscriptions that map to the environments documentation can be found here.

+

Route to live overview

+

For all Service teams, the defined minimum 'route to live' path is:

+
    +
  • Dev > Pre-Prod > Production (Live). + -- All other environments are optional: SND, TST, DMO etc.
  • +
+

This means that Service Teams must have passed automated checks/smoke tests in the Pre-prod environment and any initial merge and validation checks in Development only before going Live. All other environments are there on-demand for teams. We may have additional environments if the future if needed, such as a dedicated ITHC/PEN Test area (or Demo can be used), but again these would be optional.

+

=== note + When deploying from a previous image/artefact already deployed to an environment, no CI/build is required. Environments are selectable.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/integration-patterns/dynamics-and-platform-platform/index.html b/Platform-Architecture/integration-patterns/dynamics-and-platform-platform/index.html new file mode 100644 index 0000000..0e64e49 --- /dev/null +++ b/Platform-Architecture/integration-patterns/dynamics-and-platform-platform/index.html @@ -0,0 +1,2767 @@ + + + + + + + + + + + + + + + + + + + + + + + Dynamics 365 & Power Platform Integration Patterns - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Dynamics & Plower Platform

+ +

Dynamics 365 & Power Platform Integration Patterns

+

A key feature for tenant teams is the integration with Microsoft Dynamics 365 and Power Platform. When integration with these components, Service Principals are used to generate Bearer tokens to authenticate to the instance. This document describes the best practices, networking routes and pros and cons of different solutions.

+

Dynamics 365

+

Azure Tenancy Alignment

+

The Platform broadly aligns with the Defra Azure tenancies that the Microsoft SaaS products are hosted in. Namely, Defra and DefraDev. This reduces/removes the cross-tenant identity requirements and enables the use of Azure-managed identities with Microsoft Platform-managed secrets. A key requirement was to align identity solutions and ensure optimal automation.

+

Authentication to Dynamics

+

To connect to Azure services, the best practice approach is to use Azure Managed Identities. This can be used to connect to almost all Azure PaaS Services, as well as SaaS products such as Dynamics 365. This uses the industry standard OAuth 2 protocols - https://auth0.com/docs/get-started/authentication-and-authorization-flow/client-credentials-flow

+
    +
  • ADP Recommend and promote only using Azure Managed Identities to connect to Dynamics 365 with OAuth 2.
  • +
+

Why do we recommend this?

+

Automation and security

+
    +
  • Managed Identities are strongly preferred/required over service principals (app regs) and directly solve a key issue in Defra around Secret and Credential management and renewals. No Client Secret is required and no separate components are deployed.
  • +
  • If not using an Azure MI, App Registrations must be created in Entra ID, adding to the automation and integration logic required to support, run and maintain. This would still be using OAuth2.
  • +
  • App Reg Secrets expire every 1 year - and require an automated (or manual at worst) process to ensure these are kept up to date. An automated background process must be in place for App Registrations. With an MI, this process is managed by Microsoft securely.
  • +
  • Managed identities directly solve this issue entirely within Azure as the Microsoft Platform itself automatically manages and renews the credentials without any user or service impact. The process is transparent and happens 6-monthly. No credentials are required to be stored anywhere.
  • +
  • You don't need to store additional secrets and credentials, enhancing security presence.
  • +
+

ADP Processes

+
    +
  • Within ADP, a Workload Identity (Azure MI) is automatically created for each service out of the box on deployment. This same Identity can be used within Dynamics as an 'application user' to integrate with 0365. No additional deployment is required in Azure.
  • +
  • This removes the requirement for additional components (App Reg's) to be created and the same identity can be used for other scenarios. A dedicated MI can also be created, when required.
  • +
  • No credential management or storage is required with the ADP recommended approach. Development teams can use NodeJS or C# SDKs to develop their applications and use this functionality easily with support from Microsoft / community.
  • +
  • Note: The application user (the MI/WI) must be added to Dynamics with the relevant security profile and permissions. Reference articles below detail this fully, including any relevant SDKs.
  • +
+

Example reference articles: https://blog.yannickreekmans.be/secretless-applications-use-azure-identity-sdk-to-access-data-with-a-managed-identity/ and https://www.eugenevanstaden.com/blog/d365-managed-identity/

+

Using an MI to connect to Dataverse https://community.dynamics.com/blogs/post/?postid=09f639ba-5134-4bd1-8812-04e019b7b920

+

A deeper understanding of App Regs, OAuth and connetivity can be found here - reference articles: https://learn.microsoft.com/en-us/power-apps/developer/data-platform/authenticate-oauth#app-registration

+

https://www.vodovnik.com/2023/01/12/interacting-with-dataverse-data-from-azure-c/

+

Networking

+

When integrating with SaaS products, there are networking considerations to consider in terms of network security and ingress/egress charges. When working with Azure PaaS and SaaS Services, a number of options may be available to you depending on the pattern.

+

Virtual network integration ** DRAFT **

+

Microsoft are introducing a number of enhancements to secure your applications running in Azure with SaaS products. One example is https://learn.microsoft.com/en-us/data-integration/vnet/data-gateway-power-platform-dataflows

+

These tools allows you to securely connect Azure Services to products like Power Platform and Dynamics securely - all within your own VNet without any public internet exposure, at lower cost.

+

ADP is building a future pattern around these scenarios and will be fully detailed here shortly. As the ADP Cluster is within an Azure VNET, VNET Integration is required for secure/none public connectivity. Alternatively, whitelisting via the ADP Front Door can be used to secure integrations. A pattern is being developed for this approach.

+

Power Platform

+

Testing and Quality Assurance ** DRAFT **

+

TBC -

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/integration-patterns/overview/index.html b/Platform-Architecture/integration-patterns/overview/index.html new file mode 100644 index 0000000..5e44f0a --- /dev/null +++ b/Platform-Architecture/integration-patterns/overview/index.html @@ -0,0 +1,2545 @@ + + + + + + + + + + + + + + + + + + + + + + + Integration Patterns Overview - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Overview

+ +

The integration patterns that can be utilized in ADP will be defined here. This may be internal and external patterns, including with Azure services, SaaS products, and third-party services.

+

A number of services are in scope and/or have defined patterns, including:

+
    +
  1. Azure Integration Services (PaaS services, APIs, etc.)
  2. +
  3. Microsoft Dynamics 365 and Power Platform (SaaS products)
  4. +
  5. On-premise / third party (i.e. RPA, Crown Hosting, etc.).
  6. +
  7. External Integration Products (i.e.. Dell Boomi, MuleSoft etc._)
  8. +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/permissions-model/index.html b/Platform-Architecture/permissions-model/index.html new file mode 100644 index 0000000..455a3a5 --- /dev/null +++ b/Platform-Architecture/permissions-model/index.html @@ -0,0 +1,3205 @@ + + + + + + + + + + + + + + + + + + + + + + + ADP Permissions Model - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

ADP Permissions Model

+

This page contains an overview of the roles and permissions within ADP (Azure Development Platform). It outlines the different roles such as Platform User, Technical Team Member, Delivery Team Admin, Delivery Programme Admin, and ADP Admin, along with their respective descriptions and responsibilities. Explains the permissions associated with each role in the ADP Portal, Azure DevOps, and GitHub. It describes how permissions are stored in a database and Azure AD using AAD groups. Users are assigned to specific groups based on their roles, granting them the necessary permissions in the ADP Portal, GitHub, Azure, and Azure DevOps.

+

ADP Roles

+

The table below details the roles in the Platform, their scope and description:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RoleScopeDescription
Platform UserPlatformA user of the ADP Platform, who has access to the ADP Portal and can be a member of a Delivery Project or Programme. To do this, they must have a Cloud or DefraGovUK Account.
Technical Team MemberDelivery ProjectTech Lead, Tester, Developer, or Architect on the Delivery Project team.
Delivery Team MemberDelivery ProjectMember of the Delivery Project team.
Delivery Team AdminDelivery ProjectTech lead and/or Delivery Manager for the Delivery Project team.
Delivery Programme AdminDelivery ProgrammeAdministers Delivery Programmes within the ADP Portal.
ADP AdminPlatformADP Platform Engineering delivery team member.
CCoE EngineerOrganizationCloud Center of Excellence engineer.
ADP Service AccountPlatformService account used by automation within ADP.
+
+

Info

+

Please note: if a user holds multiple roles, they will receive the combined permissions associated with all their roles. This ensures that they have access to all the rights and privileges granted by the most significant role they possess. Essentially, the role with the highest level of permissions takes precedence.

+
+

Portal Permissions

+

The permissions for the portal are stored both in a database and in Azure AD with the use of AAD groups. The group assignments and naming convention are as follows:

+
    +
  • Delivery Non Technical Team Member are assigned to Delivery AAG-Users-ADP-{programme}-{delivery project}_NonTechUser AAD group.
  • +
  • Technical Team Member are assigned to AAG-Users-ADP-{programme}-{delivery project}_TechUser AAD group.
  • +
  • Delivery Team Admin are assigned to AAG-Users-ADP-{programme}-{delivery project}_Admin AAD group.
  • +
  • Delivery Programme Admin are assigned to AAG-Users-ADP-{programme}_Admin AAD group.
  • +
  • ADP Admins are assigned to AAG-User-ADP-PlatformEngineers AAD group.
  • +
+

By being added to these groups in Azure AD via the ADP Portal, users will be granted the permissions for their role in the ADP Portal.

+

The permissions for each role in the ADP Portal are detailed below.

+

Platform User

+

ADP Portal Permissions for the Platform User role:

+
    +
  • Access to the ADP Portal.
  • +
  • Can be selected as a Delivery Project team member/admin or Delivery Programme Admin.
  • +
  • Read access to all ALBs, delivery projects, programmes, etc.
  • +
+

Delivery Project: Team Member

+

ADP Portal Permissions for the Delivery Project Team Member role:

+
    +
  • Includes all Platform User permissions.
  • +
  • Displayed as a Member of assigned Delivery Project teams.
  • +
+

Delivery Project: Technical Team Member

+

ADP Portal Permissions for the Technical Team Member role:

+
    +
  • Includes all Delivery Project Team Member permissions.
  • +
  • Scaffold/create new services for their delivery project (inc. repos).
  • +
+

Delivery Project: Team Admin

+

ADP Portal Permissions for the Delivery Team Admin role:

+
    +
  • Includes all Delivery Team Member permissions.
  • +
  • Has the ability to invite/add users to their Delivery Project team as: Team members, Technical Team Members, Team Admins, and Technical Team Admins via the ADP Portal. The function adding an team member will add them to required GitHub team, Azure DevOps project, and Azure AAD groups required for Azure resource access, depending on new team members roles in the Delivery Project.
  • +
  • Edit delivery project details in the ADP Portal.
  • +
+

Delivery Programme Admin

+

ADP Portal Permissions for the Delivery Programme Admin role:

+
    +
  • Includes all Delivery Team Admin permissions for all Delivery Projects in the programme.
  • +
  • Can create new Delivery Projects in the programme.
  • +
  • Can edit programme details for programmes they administrator in the ADP Portal.
  • +
  • Can invite/add other admins to the programmes they administer.
  • +
+

ADP Admin

+

ADP Portal Permissions for the ADP Admin role: +- Full access to the ADP Portal and is admin for all ALBs, delivery projects, programmes, etc.

+

GitHub Permissions

+

GitHub Permissions are assigned and managed using GitHub teams. The following GitHub teams are automatically assigned to each repository owned by a Delivery Project:

+
    +
  • ADP-{programme}-{delivery project}-Contributors GitHub team: contains all Delivery Project Technical Team Members
  • +
  • ADP-{programme}-{delivery project}-Admins GitHub team: contains users that have been assigned both the Technical Team Member & Delivery Team Admin role for the Delivery Project
  • +
  • ADP-Platform-Admins GitHub team: contains the ADP Admins.
  • +
+
+

Info

+

Please Note: Users that have not been asssigned the Technical Team Member role for a Delivery Project will not be given any GitHub permissions. Delivery Programme Admins & Delivery Project Team Admins can use the ADP Portal to add and remove users from their GitHub teams in via the add/ remove user functionality.

+
+
+

Info

+

Please Note: By default all repositories are public and can be accessed by anyone. Anyone can clone, fork, and view the repository. However, only members of the GitHub team will be able to push changes to the repository.

+
+

Technical Team Member

+

Technical Team Members are given the following permissions in GitHub:

+
    +
  • Write access to Delivery Projects GitHub repositories, which will allow triage permissions plus read, clone and push to repositories.
  • +
+

Technical Team Member with Delivery Team Admin

+

Users that have been given both the Technical Team Member & Team Admin role within a Delivery Project are given the following permissions in GitHub:

+
    +
  • All permissions of a Technical Team Member.
  • +
  • Admin access to Delivery Projects GitHub repositories, which will allow full access to their repositories including sensitive and destructive actions.
  • +
+

ADP Admin

+

ADP Admins are given the following permissions in GitHub:

+
    +
  • All permissions of a Technical Team Member with Delivery Team Admin.
  • +
  • Full access to all ADP repositories in the DEFRA GitHub organization.
  • +
+

Azure Permissions

+
+

TODO

+

TBC

+
+

For Azure permissions we use AAD group to given users the correct level of permissions. There are the key groups are for Azure permissions are as follows:

+
    +
  • Technical Team Member are assigned to AAG-Users-ADP-{programme}-{delivery project}_TechUser AAD group.
  • +
  • ADP Admins are assigned to AAG-User-ADP-PlatformEngineers AAD group.
  • +
+
+

Info

+

Users with Delivery Team Admins, Delivery Programme Admins, or Delivery Team Members roles only will not be given any Azure permissions. They can add, edit, or remove users from their delivery projects AAD groups in the ADP Portal by the add/ remove user functionality in the ADP Portal.

+
+

Technical Team Member

+

Technical Team Members are given the following permissions in Azure: +- ...

+

Spell out permissions for each group in each of Azure resources, etc

+

Should this be done here or in an another page?

+

Resource group +Database

+
    +
  • AAG-Azure-ADP-{programme}-{delivery project}-{environment}-PostgressDB_Reader
  • +
  • AAG-Azure-ADP-{programme}-{delivery project}-{environment}-PostgressDB_Writer
  • +
+

Azure DevOps Permissions

+
+

TODO

+

TBC

+
+

ADP-ALB-ProgrammeName-DeliveryProjectName-Contributors - For Technical Team Members (write access level to the repo)

+

Sonar Cloud Permissions

+

ADP will use Technical Team members GitHub account to assign permissions in SonarCloud. Assuming that this GitHub account has been added to the DEFRA's SonarCloud organisation, ADP will assign their GitHub account to the their Delivery Project's SonarCloud group when they are added to a Delivery Project in the ADP Portal. Giving them access to do the required actions for their Delivery Project within SonarCloud.

+
+

Info

+

By default all Sonar Cloud projects are public and can be accessed by anyone in read only mode.

+
+

ADP portal creates a SonarCloud user group and permissions template per Delivery Project on creation using the {Delivery Project Team name} as the groups name. This group will filter on SonarCloud projects by the Delivery Project's ADP namespace or alias fields. For example if project FCP ACD has a ADP namespace of fcp-acd and a alias of ffc-acd group will have permissions on Sonar Cloud project starting with fcp-acd* or ffc-acd* (ffc-acd-frontend, fcp-acd-backend, etc).

+
+

Warning

+

SonarCloud projects that do not include the delivery projects ADP namespace or alias in the name of the project in Sonar Cloud will not be included in the group permissions. An Sonar Cloud Organisation Admin will need to add the service to the group permissions manually.

+
+

Technical Team Member

+

Each Technical Team Member will be added to the SonarCloud user group for the Delivery Project they are a member of in Sonar Cloud. The permissions for the group are as follows for each service in Sonar Cloud:

+
    +
  • Administer Issues: Change the type and severity of issues, resolve issues as being "fixed", "accepted" or "false-positive" (users also need "Browse" permission).
  • +
  • Administer Security Hotspots: Resolve a Security Hotspot as reviewed (fixed or safe), reset it as to review (users also need Browse permission).
  • +
+

ADP Admin

+

ADP Admins will be able to see all services (SonarCloud projects) created by ADP's automation in Sonar Cloud. These are the permissions for the ADP user group in Sonar Cloud as the Sonar Cloud project level:

+
    +
  • Administer Issues: Change the type and severity of issues, resolve issues as being "fixed", "accepted" or "false-positive" (users also need "Browse" permission).
  • +
  • Administer Security Hotspots: Resolve a Security Hotspot as reviewed (fixed or safe), reset it as to review (users also need Browse permission).
  • +
  • Administer: Access project settings and perform administration tasks. (Users will also need "Browse" permission)
  • +
  • Execute Analysis: Ability to get all settings required to perform an analysis (including the secured settings like passwords) and to push analysis results to the SonarCloud server.
  • +
+

ADP Service Account & ADP SonarCloud Automation

+

ADP requires these permissions in order to run perform API administration tasks in Sonar Cloud at the organisation level. These permissions are required to create the user groups, permissions templates, and add users to the permissions templates in Sonar Cloud. The permissions are as follows:

+
    +
  • Administer: Allows you to perform any action on both Quality Profiles and Quality Gates.
  • +
  • Execute Analysis: Allows you to trigger an analysis and to push analysis results to the SonarCloud server.
  • +
  • Create Project: Allows you to initialize a project and configure its settings before the initial first analysis is performed.
  • +
  • Administer Organization: Allows you to perform all administrative functions for an organization.
  • +
+

Details of SonarCloud permissions.

+

Current known Sonar Cloud Web API Actions:

+
    +
  • Create User Group - Create a group. Requires the following permission: 'Administer System'.
  • +
  • Search for User - Search for users. Requires the following permission: 'Administer System'.
  • +
  • Add user to User Group - Add a user to a group. 'id' or 'name' must be provided. Requires the following permission: 'Administer System'.
  • +
  • Create Permissions Template -Create a permission template.Requires the permission 'Administer' on the organization.
  • +
  • Update Permissions Template - Update a permission template. Requires the permission 'Administer' on the organization.
  • +
  • Add User Group to Permission Template - Add a group to a permission template. Requires the permission 'Administer' on the organization. Giving a group the permission of codeviewer, issueadmin, securityhotspotadmin, scan, and user to the group added to permissions template.
  • +
+
+

Info

+

Not possible to add new users directly to github organisation. User will need to be added to the Sonar Cloud organisation manually by a Sonar Cloud Organisation Admin or allow for member synchronization on DEFRA GitHub organisation.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/platform-azure-services/ai-services/index.html b/Platform-Architecture/platform-azure-services/ai-services/index.html new file mode 100644 index 0000000..5714eda --- /dev/null +++ b/Platform-Architecture/platform-azure-services/ai-services/index.html @@ -0,0 +1,3295 @@ + + + + + + + + + + + + + + + + + + + + + + + Azure AI Services - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +
+

TODO

+

This page is a work in progress and will be updated in due course.

+
+

Azure AI Services

+

This article details the AI Services Architecture for the solution at a high level.

+

AI Services supported by ADP:

+ +
+

Warning

+

Please ensure you fellow DEFRA's guidelines and policies when using AI services. This includes the use of data and the use of AI services in general in order to ensure your delivery project is using AI responsibly.

+
+

Azure Open AI

+

Azure Open AIAzure OpenAI Service provides REST API access to OpenAI's powerful language models including the GPT-4, GPT-4 Turbo with Vision, GPT-3.5-Turbo, and Embeddings model series. In addition, the new GPT-4 and GPT-3.5-Turbo model series have now reached general availability. These models can be easily adapted to your specific task including but not limited to content generation, summarization, image understanding, semantic search, and natural language to code translation.

+

Deployed & Supported Models

+

The Azure Development Platform (ADP) supports a range of models within Azure OpenAI. However, the availability of these models is limited to those supported by Azure in the UK South region. The following table lists the supported models:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelDeploymentQuotaDescription
gpt-4gpt-480kAn improvement on GPT-3.5, capable of understanding and generating both natural language and code.
gpt-35-turbogpt-35-turbo350kAn improvement on GPT-3, capable of understanding and generating both natural language and code.
text-embedding-ada-002text-embedding-ada-002350kConverts text into numerical vector form to facilitate text similarity comparisons.
text-embedding-3-largetext-embedding-3-large350kThe latest and most capable model for converting text into numerical vector form for text similarity comparisons. Please note that upgrading between different embedding models is not supported.
+
+

Warning

+

All Delivery Projects should be mindful of the quota restrictions for each model per subscription/region. These models are shared resources among all Delivery Projects. If more quota is needed, ADP can make a request. However, please note that any increase in quota is subject to Microsoft’s approval.

+
+

Architecture

+

ai-services-architecture

+

Within the ADP Platform, Azure Open AI Services are deployed with an Azure API Management (APIM) to provide a secure and scalable API endpoint for the AI services. The APIM is used to manage the API lifecycle, provide security, and monitor the API usage. The APIM is deployed in a Subnet (/29) and uses a private link to connect to the Azure OpenAI service. Additionally, a developer portal is deployed with the APIM, offering self-service API documentation for the AI services. Between the delivery projects service and the APIM, private link will be implemented and the APIM will use the services' managed identity with the role of Cognitive Services OpenAI User assigned. This will allow the APIM to access the AI services on behalf of the delivery project's service privately and securely.

+

For any other Azure services that require access to the AI services, they will need to utilize the APIM endpoint and the managed identity.

+

Iterative Deployment

+

ai-services-0.1-architecture

+

To meet the timelines and requirements of the delivery projects, our initial step will be to deploy the Azure OpenAI Service. We will provide the AKS cluster and Azure AI Search with direct access to this service over a private endpoint, assigning the role of Cognitive Services OpenAI User.

+

This approach will enable the delivery projects to begin utilizing the AI services and provide valuable feedback. Once the APIM is deployed, we will transition the AI services to APIM. This will establish a secure and scalable API endpoint for the AI services.

+
+

Note

+

For local development purposes, the delivery projects can directly use the Azure Open AI services in the SND environment only, provided they are connected to the DEFRA VPN or using a DEFRA laptop. This setup will facilitate testing of the AI services and the provision of feedback.

+
+

Developer Access

+

For local development, developers will be able to access SND only via the DEFRA VPN or DEFRA laptop with the assigned role of Cognitive Services OpenAI User. Giving the developers the ability to test Azure Open AI services locally via APIM and view model deployments of the deployment Azure Open AI service.

+

For Dev plus environments, developers will be able to access SND only via the DEFRA VPN or DEFRA laptop with the assigned role of Cognitive Services OpenAI User. This will currently allow them to:

+
    +
  • View the resource in Azure Portal
  • +
  • View the resource endpoint under “Keys and Endpoint” but not the keys.
  • +
  • View the resource and associated model deployments in Azure OpenAI Studio
  • +
  • View available models for deployment in Azure OpenAI Studio
  • +
  • Use the Chat, Completions, and DALL-E (preview) playground experiences with any models that have already been deployed to this Azure OpenAI resource.
  • +
+
+

Note

+

In Dev plus environments, APIM endpoints will not be exposed for local developer access. They will remain accessible only to ADP Azure services or authenticated Delivery project's services.

+
+

Monitoring

+

APIM is used to enhance the monitoring of OpenAI usage per service. You can utilise an OpenAI Emit Token Metric policy to send metrics to Application Insights about consumption of large language model tokens through Azure OpenAI Service APIs. Token count metrics include: Total Tokens, Prompt Tokens, and Completion Tokens with additional dimensions such as the User ID and API ID. By leveraging these metrics, ADP can monitor usage per service. This information can be utilized by both the project delivery teams and ADP to infer usage, potentially request additional quota if required, and for billing purposes.

+

To monitor Azure Open AI directly we enable the Azure OpenAI Service Diagnostic Logs. This allows us to monitor the usage of the AI services directly. The log data is stored in Azure Monitor (Log Analytics Workspace), where both ADP and the delivery projects can gain insights such as:

+
    +
  • Azure OpenAI Requests: Total number of calls made to the Azure OpenAI API over a period of time.
  • +
  • Generated Completion Tokens: Number of generated tokens (output) from an Azure OpenAI model.
  • +
  • Processed Inference Tokens: Number of inference tokens processed by an Azure OpenAI model. Calculated as prompt tokens (input) + generated tokens.
  • +
+

Quota & Token Management

+

The Azure Open AI services are shared between delivery projects and have a quota limit per subscription/region. The quota limit is shared between the delivery projects and can be increased if required. The quota limit will be monitored by the ADP team and will be increased if required with the approval from Microsoft.

+

Given the current needs and requirements of the ADP platform, we have opted for the pay-as-you-go pricing model for the Azure Open AI services. This allows the delivery projects to pay only for what they use, eliminating concerns about overcharging. However, this does mean that there is a Tokens-per-Minute limit per model for the Azure Open AI services. Delivery projects need to be aware of this limit when using the AI services.

+

To manage this and ensure efficient use of Azure Open AI, APIM will provide policy enforcement to manage the quota limit and offer a better experience to the delivery projects when the quota limit is reached:

+
    +
  • Retry policy: When the quota limit is reached, the Azure OpenAI will return a 429 status code to the delivery project's service. APIM will implement a retry policy to wait for a certain amount of time before retrying the request to the Azure Open AI service. This will allow the delivery project's service to wait for the quota limit to be reset and then retry the request to the Azure Open AI service.
  • +
  • Token limit policy: By relying on token usage metrics returned from the OpenAI endpoint, the policy can accurately monitor and enforce limits in real time. The policy also enables precalculation of prompt tokens by API Management, minimizing unnecessary requests to the OpenAI backend if the limit is already exceeded.
  • +
+

Possible Future Enhancements

+

Semantic caching for Azure OpenAI APIs in APIM

+

Enable semantic caching of responses to Azure OpenAI API requests to reduce bandwidth and processing requirements imposed on the backend APIs and lower latency perceived by API consumers. With semantic caching, you can return cached responses for identical prompts and also for prompts that are similar in meaning, even if the text isn't the same.

+

More Azure OpenAI Service Model Deployments

+

Currently, the range of models that can be deployed is limited to those supported by Azure in the UK South region. In the future, we will look to provide a broader selection the models for delivery projects to utilise. This includes potential support for additional models such as Whisper, DALL-E, and GPT-4o models.

+

Reserved Capacity for Azure OpenAI Service

+

Reserved capacity, also known as Provisioned Throughput Units (PTU), is a feature of Azure OpenAI. The newer offering, PTU-M (Managed), abstracts away the backend compute, pooling resources. Beyond the default TPMs described above, this Azure OpenAI service feature, PTUs, defines the model processing capacity. It uses reserved resources for processing prompts and generating completions.

+

PTUs are purchased as a monthly commitment with an auto-renewal option, which will reserve Azure OpenAI capacity within an Azure subscription, using a specific model, in a specific Azure region. TPM and PTU can be used together to provide scaling within a single region.

+

Please note, PTU minimums are very expensive. This requires ADP to be at a certain scale to justify the cost across its Delivery Projects.

+

Key Resources

+ +

Outstanding Questions

+
    +
  • ???
  • +
+ +

Azure AI Search Azure AI Search (formerly known as "Azure Cognitive Search") provides secure information retrieval at scale over user-owned content in traditional and generative AI search applications.

+

Information retrieval is a foundational aspect of any application that surfaces text and vectors. Common use cases include catalog or document search, data exploration, and increasingly, chat-style applications over proprietary grounding data. When creating a search service, you’ll work with the following capabilities:

+
    +
  • A search engine for vector search and full text and hybrid search over a search index
  • +
  • Rich indexing with integrated data chunking and vectorization (preview), lexical analysis for text, and optional applied AI for content extraction and transformation
  • +
  • Rich query syntax for vector queries, text search, hybrid queries, fuzzy search, autocomplete, geo-search and others
  • +
  • Azure scale, security, and reach
  • +
  • Azure integration at the data layer, machine learning layer, Azure AI services and Azure OpenAI
  • +
+

ADP provides a managed Azure AI Search service for delivery projects to use which is scalable and secure using best practice. The core components (indexes, datastores, etc) of the Azure AI Search will be dependently deployable and can be created by delivery projects as required on a self-service basis.

+

Supported Features

+
+

Warning

+

Features that require external services will be limited to what has already been previously request by Delivery Projects. This normally effects supported data sources and skillsets. If a Delivery Project requires a new data source or skillset, they will need to request this from ADP and it will be reviewed and approved if it is required and does not affect the current delivery projects

+
+

Supported features of Azure AI Search:

+
    +
  • Indexes: An index is a persistent store of documents that are searchable using Azure AI Search. An index is similar to a database table, but it contains a schema that describes the structure of the documents within the index. An index can contain one or more fields, each of which has a name and a data type. An index can also contain a scoring profile, which is used to rank search results based on relevance. An index can be created, updated, and deleted using the Azure AI Search REST API or the Azure Portal.
  • +
  • Indexers: Indexers are used to extract content from an external data source and populate an index with it. Indexers can be scheduled to run at regular intervals to keep the index up-to-date.
  • +
  • Data Sources: A data source is a connection to an external data store that contains the content you want to index. Data sources can be used to connect to a variety of data stores, including Azure Blob Storage, Azure Cosmos DB, Azure SQL Database, and more.
      +
    • Azure Blob Storage: Azure Blob Storage is a cloud-based storage service that allows you to store large amounts of unstructured data, such as text files, images, and videos. Azure Blob Storage can be used as a data source for Azure AI Search to index content from text files, images, and videos.
    • +
    +
  • +
  • Skillsets: A skillset is a collection of cognitive skills that can be used to enrich content during indexing. A skillset can contain one or more skills, each of which performs a specific task, such as extracting text from images or translating text to another language. A skillset can be used to extract information from unstructured data, such as images, videos, and documents, and make it searchable using Azure AI Search. Supported Skillsets include:
      +
    • Azure Open AI Embedding Skillset: This skillset uses the Azure Open AI Embedding model to convert text into numerical vector form to facilitate text similarity. This skillset can be used to enhance the search experience by providing more relevant search results based on the semantic meaning of the text.
    • +
    +
  • +
+

Architecture

+

ai-services-architecture

+

ADP has selected a Standard SKU for the Azure AI Search service as it provides a cost effective balance of storage and query capacity for the delivery projects. Azure AI Search is a shared service between the ADP Delivery Projects. Allowing up to 50 indexes and 50 indexers in total and 35 GB of storage per partition and 160 GB of storage with two replicas, requiring two search unit per environment. This will allow 99.9% availability for read operations.

+
+

Note

+

The ADP Team can increase the tier and the number of search units (replicas and partitions) as required. Under the current scope of the delivery projects, the standard SKU with two search units is sufficient, allowing for 99.9% availability for read operations. If a project requires 99.9% availability for read/write operations, additional search units can be added.

+
+

Azure AI Search will not be reachable from the public internet and will only be accessible via a private link to DEFRA VPN, DEFRA laptops, or consuming Azure/ Delivery Project services via a private endpoint.

+

Delivery Project services will be given the role of Search Index Data Contributor scoped to the indexes that the service requires. This will allow the Read-write access to the content of these indexes.

+

Azure AI Search will need access to Azure Open AI embedding models to allow for semantic search in the search indexes and for use in its skill sets. No direct access to the Azure open AI services will be allowed and will only be accessible via the Azure API Management endpoint. To ensure that Azure AI Search has efficient access, the role of Cognitive Services OpenAI User will be assigned to the Azure AI Search service system assigned managed identity. This will allow the Azure AI Search service to access the Azure Open AI services via the APIM endpoint over a private link securely and efficiently.

+

Developer Access

+

For SND environments, developers will be able to access the Azure AI Search service via the Azure Portal with the assigned role of Reader across the whole of the AI Search service, as well as the role of Search Index Data Contributor scoped to the indexes that are created for their Delivery Project. This will allow the Read-write access to content of these indexes and also import, refresh, or query the documents collection of an index. It facilitates local development and testing of the Azure AI Search service.

+

For Dev plus environments, developers will be able to access the Azure AI Search service via the Azure Portal with the assigned role of Reader. This role currently enables them to read across the entire service, including search metrics, content metrics (storage consumed, number of objects), and the object definitions of data plane resources (indexes, indexers, and so on). However, they won’t have access to read API keys or content within indexes, thereby ensuring the security of the data control plane.

+

Developers in all environments will be able to access Azure AI Search only via DEFRA VPN or DEFRA laptop with restrictions that are detailed above.

+

Monitoring

+
+

Note

+

Azure AI Search doesn't monitor individual user access to content on the search service. If you require this level of monitoring, you need to implement it in your client application.

+
+

The diagnostic logs for Azure AI Search are stored in Azure Monitor (Log Analytics Workspace). This allows ADP and the delivery projects to gain insights into aspects such as latency, errors, and usage of the search service.

+ +

The Azure AI Search service will be deployed as a common service for use by all Delivery Projects. For self-service creation and updating of the Azure AI Search Components, developers will be able to use ADP PowerShell scripts and a JSON definition file. These components will be created within the X repository, ensuring that the components are consistently created across all projects and environments using Azure Pipelines.

+

Token replacement

+

????

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
{
+  "name": "MyDataSource",
+  "description": "Example of a data source that uses a connection string with a token",
+  "type": "sharepoint",
+  "subtype": null,
+  "credentials": {
+    "connectionString": "#{MyDataSourceConnectionString}#"
+  },
+  "container": {
+    "name": "useQuery",
+    "query": "includeLibrary=...."
+  },
+  "dataChangeDetectionPolicy": null,
+  "dataDeletionDetectionPolicy": null,
+  "encryptionKey": null,
+  "identity": null
+}
+
+

Possible Future Enhancements

+
    +
  • TBC
  • +
+

Key Resources

+ +

Outstanding Questions

+
    +
  • Where is the best place for the deployment scripts for Azure AI Search components? Very unsure of how is best to make this self-service for the delivery projects.
  • +
  • What data source to support?
  • +
  • What skillsets to support? Custom skillsets?
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/platform-azure-services/data-services/index.html b/Platform-Architecture/platform-azure-services/data-services/index.html new file mode 100644 index 0000000..d064749 --- /dev/null +++ b/Platform-Architecture/platform-azure-services/data-services/index.html @@ -0,0 +1,2674 @@ + + + + + + + + + + + + + + + + + + + + + + + Azure Data Services - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/platform-azure-services/integration-services/index.html b/Platform-Architecture/platform-azure-services/integration-services/index.html new file mode 100644 index 0000000..99f5fec --- /dev/null +++ b/Platform-Architecture/platform-azure-services/integration-services/index.html @@ -0,0 +1,2634 @@ + + + + + + + + + + + + + + + + + + + + + + + Azure Integration Services - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +
+

TODO

+

This page is a work in progress and will be updated in due course.

+
+

Data Services

+

This article details the Integration Services Architecture for the solution at a high level.

+

Integration Services supported by ADP:

+ +

Azure Service Bus

+

Azure API Management

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/scaling/index.html b/Platform-Architecture/scaling/index.html new file mode 100644 index 0000000..14c4346 --- /dev/null +++ b/Platform-Architecture/scaling/index.html @@ -0,0 +1,2704 @@ + + + + + + + + + + + + + + + + + + + + + + + Scaling of the Platform - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Scaling

+ +

Scaling of the Platform

+
+

TODO

+

This page is a work in progress and will be updated in due course.

+
+

On the Platform we're acutely aware we'll be hosting a large variety of services, with a wide range of requirements. We need to understand and define how we'll manage and scale, based on legitimate requirements and workload demands. Here, we'll cover this in terms of tenant applications, azure infrastructure, storage capacity, service limits, pipelines, ADO projects etc.

+

What do you scale?

+

The platform. But specifically:

+
    +
  • Azure Kubernetes Service (AKS) - Cluster, Nodes, & Apps/Pods
  • +
  • Azure Storage Locations (Log Analytics, Storage accounts, etc.)
  • +
  • PostgreSQL Flexible Server - CPU, Memory & Storage
  • +
  • Azure Cosmos DB
  • +
  • Azure Front Door
  • +
  • App Gateways and Ingress Controllers
  • +
  • Deployment Pipelines
  • +
  • Azure PaaS (Keyvault, App Config, etc.)
  • +
  • Azure Redis
  • +
  • Azure Service Bus
  • +
  • Azure API Management
  • +
+

How do you scale?

+

We'll use a range of tools and features based on the component which we'll detail below. We'll cover both our infrastructure and service scaling configuration, as well as how we scale Platform Teams, Tenants and Services appropriately (i.e. supporting tooling, pipelines, projects, repos, etc.).

+

Scaling Configurations

+

Azure Kubernetes Services

+

PostgreSQL Flexible Server

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Architecture/tech-radar/index.html b/Platform-Architecture/tech-radar/index.html new file mode 100644 index 0000000..facad0b --- /dev/null +++ b/Platform-Architecture/tech-radar/index.html @@ -0,0 +1,3121 @@ + + + + + + + + + + + + + + + + + + + + + + + Tech Radar - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tech Radar for ADP

+

The Tech Radar is a tool to inspire and support teams to pick the best technologies for new projects. It is a visualisation of the technologies that are in use and recommended by the majority of teams. The radar is split into 4 quadrants and 4 rings.

+

Tech Radar

+

Quadrants

+

Description

+

The Tech Radar should have 4 quadrants. Each Entry in the technology stack is represented by a blip in a quadrant. The quadrants represent broad categories that entries fit into.

+
    +
  • Languages & Frameworks
  • +
  • Tooling
  • +
  • Infrastructure
  • +
  • Observability
  • +
+

Rings

+

Description

+

The Tech Radar should have 4 rings with the central ring representing entries that are in use and recommended by the majority of teams. Whilst the outer ring represents entries that are not recommended and for which we recommend teams transitions to a recommended entry.

+
    +
  • Adopt
      +
    • This technology is recommended for use by the majority of teams with a specific use case.
    • +
    +
  • +
  • Trial
      +
    • This technology has been evaluated for specific use cases and has showed clear benefits. Some teams adopt it in production, although it should be limited to low-impact projects as it might incur a higher risk.
    • +
    +
  • +
  • Assess
      +
    • This technology has the potential to be beneficial for the organisation. Some teams are evaluating it and using it in experimental projects. Using it in production comes with a high cost and risk due to lack of in-house knowledge, maintenance, and support.
    • +
    +
  • +
  • Hold
      +
    • We don't want to further invest in this technology or we evaluated it and we don't see it as beneficial for the organisation. Teams should not use it in new projects and should plan on migrating to a supported alternative if they use it for historical reasons. For broadly adopted technologies, the Radar should refer to a migration path to a supported alternative.
    • +
    +
  • +
+

Entries FFC Technology Stack

+

The below entries are taken from the technology stack in the FFC-Development-Guide.

+

⚠️ Need to confirm whether any of the categories in the above linked technology stack would be appropriate Portal quadrants

+

Technology & Services Stack

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
EntryQuadrantRingNote
Node.jsLanguages & FrameworksAdopt
Hapi.jsLanguages & FrameworksAdopt
NPMLanguages & FrameworksAdopt
.NetLanguages & FrameworksAdopt
PythonLanguages & FrameworksAssess
NugetToolingAdopt
DockerToolingAdopt
Docker ComposeToolingAdopt
HelmToolingAdopt
BicepToolingAdopt
Azure CLIToolingAdopt
PowerShellToolingAdopt
Azure boardsToolingAdopt
JenkinsToolingHold
Azure pipelinesToolingAdopt
JestTest ToolingAdopt
xUnitTest ToolingAssess
nUnitTest ToolingAdopt
Pact BrokerTest ToolingAdopt
Web Driver IOTest ToolingAdopt
CucumberTest ToolingAdopt
SeleniumTest ToolingAdopt
BrowserStackTest ToolingAdopt
JMeterTest ToolingAdopt
SnykTest ToolingAdopt
OWASP ZapTest ToolingAdopt
AXE?Test ToolingAdopt
WAVETest ToolingAdopt
Anchor EngineTest ToolingAdopt
Azure Kubernetes ServiceInfrastructureAdopt
Flux CDInfrastructureAdopt
Azure Service OperatorInfrastructureAdopt
Azure Container RegistryInfrastructureAdopt
Azure PostgreSQL (flexible Server)InfrastructureAdopt
Azure Service BusInfrastructureAdopt
Azure Event HubsInfrastructureAssess
Azure App ConfigurationInfrastructureAdopt
Azure Key VaultInfrastructureAdopt
Azure FunctionsInfrastructureHold*must be containerized in AKS
Azure StorageInfrastructureAdopt
Entra ID workload IdentityInfrastructureAdopt
Application InsightsObservabilityAdopt
Azure ReposToolingHold
GitHubToolingAdopt
SonarCloudToolingAdopt
Docker DesktopToolingAdopt
Google AnalyticsObservabilityAdopt
PrometheusObservabilityAdopt
GrafanaObservabilityAdopt
Azure MonitorObservabilityAdopt
Visual Studio 2022ToolingAdopt
Visual Studio CodeToolingAdopt
App Reg'sToolingAdopt
Azure CosmosDB (SQL)ToolingAdopt
Azure CosmosDB (Mongo)ToolingAssess
Azure AI StudioInfrastructureAssess
Azure Machine LearningInfrastructureAssess
Azure Cognitive ServicesInfrastructureAssess
Azure AI SearchInfrastructureAssess
Azure Prompt FlowInfrastructureAssess
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Strategy/adp-platform-strategy/index.html b/Platform-Strategy/adp-platform-strategy/index.html new file mode 100644 index 0000000..d4a5dbb --- /dev/null +++ b/Platform-Strategy/adp-platform-strategy/index.html @@ -0,0 +1,2749 @@ + + + + + + + + + + + + + + + + + + + + + + + ADP Platform Strategy - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Azure Developer Platform Strategy

+

Overall platform strategy for ADP.

+
+

TODO

+

This page is a work in progress and is being updated in due course.

+
+

Vision Statement: Build Applications; not infrastructure.

+

Mission Statement: To empower every product development team in Defra to increase the speed of delivery by providing a self-service and standardized Development Platform to host their business applications securely and efficiently.

+

Our Values:

+
    +
  1. Innovation and evergreen delivery
  2. +
  3. Standardization and compliance
  4. +
  5. Full automation with 'Everything-As-Code'
  6. +
  7. Security, scalability and efficiency
  8. +
  9. Self-service & developer centricity
  10. +
  11. Fully open and transparency
  12. +
  13. Observability and monitorability
  14. +
+

Our Motto:

+

Move fast, with stable infrastructure and standardized delivery

+

Why? By providing a stable infrastructure environment with common, tried and trusted delivery processes, we'll enable application teams to move faster and realize business value quicker.

+

Vision Board.png

+

Our reach is Defra Azure wide, and will be able to be used by any product development team across the Digital delivery programme. We will focus on hosting and running digital transactional business applications.

+

Enabling Delivery - What are our Goals and Objectives?

+

To achieve our product vision of "Build Apps - Not Infra", the following (initial) goals and objectives will need to be achieved as part of the Platform delivery.

+

Our product goals

+
    +
  • “Product Teams are onboarded on day 1, in less than an hour, onto a Defra infrastructure/delivery Platform with the permissions configured based on their personas, so they can perform their role immediately.”
  • +
  • “Product Teams will be able to self-service their needs by being able to immediately build, test, deploy, and run software in a standardised way that expedites delivery and follows/promotes Defra’s development standards.”
  • +
  • "Engineers will have full observability and monitoring across the estate to be able to run and maintain their applications, securely and efficiently - without having to ask."
  • +
  • "The Platform will have pre-formed 'Exemplar Services', common code libraries, SDKs, etc. that are able to be used by, built upon, and expanded for by Product Teams that will demonstrate integration onto the Platform."
  • +
  • "The Platforms Build and Release processes will be on-demand, with all expected features and processes included out of the box for every development team."
  • +
  • "Everything will be in code, delivered fully by automation, tracked across its lifecycle, and versioned appropriately."
  • +
  • "The Platform will be continuously scalable, efficiently, to any number of product teams that are building transactional business services in Azure, without exponentially adding infrastructure and engineering resource costs."
  • +
  • "The Platform will provide the capability to view the entire estate in a single place. A business user or product team will be able to view a detailed catalogue of all services, documentation, libraries, etc. across all environments."
  • +
+

image.png

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Strategy/documentation-approach/index.html b/Platform-Strategy/documentation-approach/index.html new file mode 100644 index 0000000..008bd56 --- /dev/null +++ b/Platform-Strategy/documentation-approach/index.html @@ -0,0 +1,2578 @@ + + + + + + + + + + + + + + + + + + + + + + + Documentation Approach - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Documentation Approach

+

For ADP there is going to be two data sources for documentation one that can be external (ADP Documentation) and another that will be internal (ADP Documentation Internal) both will be contributed to on GitHub via a GitHub Git repository. Our approach is to fellow GDS's Service standard of "Make new source code open" for our documentation making most of our documentation open to the public allowing it to be easily viewed by third parties. Making it available for reuse on an open licence while still keeping ownership of the intellectual property, enabling across government collaboration and ease of support for existing and future projects with Defra. For the minority of documentation which is classified as possible sensitive information will be available via ADP Internal Documentation.

+

Diagram of our approach:

+

documentation-approach

+

Explanation:

+
    +
  • Tech User - normally a developer, Tech Lead, or Solution Architect will be able to access both internal and external documentation. Either from the ADP Portal or from a GitHub pages website. Tech users can view and extend the documentation going directly to a GitHub repository and submitting a pull request.
  • +
  • Non-tech User - Project Manager, Business Analyst, etc will have access to internal and external documentation via the ADP Portal. For the external documentation they will still have access to this GitHub page but not the external documentation's GitHub page. As non-tech users will require a GitHub account that is in Defra's GitHub Organisation to access it.
  • +
  • 3rd party - Member of the public, possible Defra supplier, member of a non-Defra government department, etc will have access to External documentation via GitHub pages website.
  • +
  • ADP Team - Will have access to all. Will be the main contributors and approvers of internal/ external documentation. ADP team will add additional documentation to the README.md of the ADP GitHub repositories. On commit to the main branch this will be copied over the internal/ external documentation repositories allowing ADP customers to reduce the amount of places they need to look for documentation increasing their productivity for learning and use when on the ADP.
  • +
  • Automated deployments - On commit to main for both internal and external documentation repositories, deployment pipelines will be ran to deploy the documentation to GitHub pages and to ADP Portal's documentation store.
  • +
+

ADP Documentation (External)

+

Portal Link: https://portal.snd1.adp.defra.gov.uk/docs/default/component/adp-documentation

+

GitHub Pages: https://defra.github.io/adp-documentation (public)

+

GitHub Repository: https://github.com/DEFRA/adp-documentation

+

What will be stored here:

+
    +
  • What is ADP? - Introduction to ADP, Pros, cons, limitations, Is your Defra project right for ADP?
  • +
  • Getting Started - Guides to enables ADP customers (users, projects, programmes) to get started quickly on ADP.
  • +
  • How to guides - Guides on how to do development functions when developing on ADP, focused on the tech user.
  • +
  • Platform Strategy - details of the ADP Platforms Strategies (current and future).
  • +
  • Migrate to ADP - Step by step instructions on how to migrate your existing Defra project & services over to ADP.
  • +
  • Developer Reference - Detailed reference material required to enable tech users to work with ADP. ADP repositories README's will be copied here.
  • +
  • Platform Architecture - Fairly high level details of ADP Architecture. Enough detail that another Platform team could not implement ADP else where.
  • +
+

ADP **Documentation Internal**

+

Portal Link: https://portal.snd1.adp.defra.gov.uk/docs/default/component/adp-documentation-internal

+

GitHub Pages: https://defra.github.io/adp-documentation-internal/

+

GitHub Repository: https://github.com/DEFRA/adp-documentation-internal (private)

+

What will be stored here:

+
    +
  • ADP Internal - Internal information that should not be accessible to the public.
  • +
  • Internal Architecture Details - Architecture information that should not be accessible to the public.
  • +
  • ADP Runbooks - step by step instructions that are required as part of the change management process in order to release to production.
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Strategy/service-deployment-strategy/index.html b/Platform-Strategy/service-deployment-strategy/index.html new file mode 100644 index 0000000..2127c61 --- /dev/null +++ b/Platform-Strategy/service-deployment-strategy/index.html @@ -0,0 +1,2732 @@ + + + + + + + + + + + + + + + + + + + + + + + Service Deployment Strategy - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Platform service Deployment Strategy

+

Guidance and Context

+

This article outlines the Platform service deployment strategies available. Development teams should read the Platform Versioning and Git strategy document before reading this. ADP’s primary deployment strategy is Rolling Deployments on AKS with HELM and FluxCD. This provides Platform services with a zero-downtime deployment strategy. This allows applications to achieve high availability with low/no business impact to live service. This is important for services that need 24/7 availability and allows the capability to deploy to production multiple times a day. In the future, we will support other deployment strategies, such as Blue-Green and Canary deployments.

+

Deployment Strategies - ADP Rolling Updates

+

ADP uses AKS (Kubernetes) with HELM Charts and Flux to perform rolling deployments. The default strategy applied to all services is rolling deployments, unless otherwise specified in the deployment YAML. We recommend starting with this strategy. This strategy allows for applications to be incrementally updated without downtime. +There are 3 core parts to a Service deployment/upgrade, which are done in the following order:

+
    +
  1. App Configuration, including Secrets,
  2. +
  3. Service Infrastructure,
  4. +
  5. Database upgrade and Web Application
  6. +
+

The deployment process flow:

+
    +
  1. +

    A new deployment is triggered via the CI & CD Pipelines for the Service:

    +
      +
    1. +

      New app Secrets are imported/updated/deleted* in the Key Vault and are mastered in the Azure DevOps (ADO) Secret Library Groups for the service.

      +
    2. +
    3. +

      New App Configuration keys and values are imported/updated/deleted in the Service Config Maps & App Configuration Service from the Service’s ‘appConfig.yaml’ files. Note: The sentinel key is not updated yet.

      +
    4. +
    +
  2. +
  3. +

    The new images and artefact are pushed to the environment Container Registry (ACR) (via pipeline deployment) and Flux updates the Services repository with the new version to be deployed:

    +
      +
    1. This can be a higher version (new image & release) or lower version (existing/rollback).
    2. +
    +
  4. +
  5. +

    Flux reconciles the Cluster with the new Web App code and Infrastructure versions requested with a rolling update. Any infrastructure updates take precedence over Application (Infra > App). + Application deployment:

    +
      +
    1. The deployment will incrementally add new Pods (web applications) onto the Nodes in the Cluster. This will automatically pick up the new App Config/Secret updates on startup.
    2. +
    3. AKS deployment will wait for those new Pods (apps) to start successfully with the configured/default (5m) wait times and health check endpoints.
    4. +
    5. Once the new pods are started and reporting healthy via the endpoint(s), traffic will then be directed to the new Pods (updated app) via the internal load balancer/NGINX gracefully.
    6. +
    7. The old Pods (previous version) will be deleted incrementally if the new Pods have started successfully, and all traffic has drained gracefully.
    8. +
    9. If the new App/Pod does not start successfully, the deployment will time out and fail after a set period of health check retries (5m), but the previous app version (Pods) will remain in place and accepting traffic. The previous version’s App Config will remain as-is/unchanged on the none-upgraded Pods. + 1. Unhealthy Pods will be removed if an upgrade fails.
    10. +
    +

    Infrastructure deployment:

    +
      +
    1. The new infrastructure will be deployed (created, updated, or deleted). This can be Queues, Topics, Datastores, Identities, etc.
    2. +
    3. Once the infrastructure upgrade is successful, the App (and database If applicable) can be deployed/upgraded.
    4. +
    +

    Database deployment:

    +
      +
    1. If a new DB Schema is to be deployed (migration required), this will be done before the Web Application is deployed.
    2. +
    3. Liquibase will perform the PostgreSQL migration using a Flux pre-deploy job.
    4. +
    5. If database deployment/migration fails, the App will not be upgraded.
    6. +
    +
  6. +
  7. +

    If a user has requested the deployment of App Config/Secrets only via the Flag in the build.yaml, the App or Infra will not be deployed on this release:

    +
      +
    1. The App Config & Secrets will be updated via the Pipeline, including the Sentinel Key with the Build ID – which triggers the configuration update.
    2. +
    3. The Reloader service will perform a rolling and zero-downtime upgrade (restart Pods) of the Service to consume the new App configuration (incremental Pod restarts).
    4. +
    +
  8. +
+
+

Note

+

All releases / deployments are promoted via the Common CI and CD Pipelines using Azure DevOps as the orchestrator. We promote continuous delivery with automated checks and tests above/in preference to manual intervention and approvals. Approval gates can be added optionally to Azure Pipelines to gate the promotion of code. 

+
+

Deployment and App Configuration Guidance / Context

+

All services will have the following settings defaulted (changeable if required):

+
    +
  • maxSurage – maximum additional Pods created at one time (50%).
  • +
  • maxUnavailable – max Pods not available (25%)
  • +
  • podDisruptionBudget – allowed disruptions for a Pod (application) (25% or at least 1)
  • +
  • min and max replicas – number of replicas of the application in the Cluster. Minimum of 3 for production for high availability.
  • +
  • All deployments of business apps are on the User/Apps Node Pools. Platform/System apps are on the System Node Pool. Taints/tolerations applied to that effect.
  • +
  • Autoscaling via HPA is enabled.
  • +
  • All services will have their own dedicated AKS Namespaces for their own team.
  • +
+

Constraints

+
    +
  • Infrastructure is always deployed first if changed; database Schema migrations are second and App code is last (associated Config & Secrets consumed at that point)
  • +
  • Database updates, if using PostgreSQL, will require development teams to deploy non-breaking changes and/or manage their schema updates appropriately with their app deployment to prevent downtime.
      +
    • Shutter pages will be included in phase 2 / Post MVP if required.
    • +
    +
  • +
  • Development teams must set health endpoints correctly for an effective rolling update.
  • +
  • App Config, Infrastructure and Application Code are tied (versioned) together as an immutable unit.
      +
    • They are versioned using semver strategy defined in the versioning article.
    • +
    +
  • +
  • App Secrets in Key Vault/ADO library group are not versioned with the App/Code or Infra, they are fully independent, and can be rotated periodically.
      +
    • All secret rotations must have an overlap in expiry periods to ensure zero-downtime upgrades. Secrets should not be tied to versions as they are rotatable as good practice.
    • +
    +
  • +
  • The Platform has defined minimum replicas/availability to meet Defra SLA’s.
  • +
  • The Platform Reloader Service will drain and replace the Pods in the Cluster with a rolling upgrade on detection of new App Config or New App Secrets automatically via the Sentinel Key update.
  • +
  • All HELM Deployments are full CRUD operations – add, update, or delete. This includes Apps, Infra and Databases. Warning: You can delete your own infrastructure and configuration!
  • +
  • All App Configuration updates are full CRUD operations – create, update, or delete.
  • +
  • Secrets are add/update only for MVP. *Delete will be added post-MVP.
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Platform-Strategy/service-versioning-strategy/index.html b/Platform-Strategy/service-versioning-strategy/index.html new file mode 100644 index 0000000..eaaed7d --- /dev/null +++ b/Platform-Strategy/service-versioning-strategy/index.html @@ -0,0 +1,2727 @@ + + + + + + + + + + + + + + + + + + + + + + + Service Versioning Strategy - DEFRA - ADP Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Platform service versioning strategy.

+

This article outlines a two-phase versioning strategy for services on ADP with the goal to support ephemeral environments by phase 2.

+

The following Git and Versioning strategies are in place and mandated:

+ +

In Phase 1, before ephemeral environments, Feature branch builds fetch the version from the main branch’s package.json file for Node and the .csproj file for C#. If the versions are the same, a validation error is thrown; if the feature branch version is higher, it's tagged with ‘-alpha*’ and the pipeline build ID. When the main branch version is pushed to the ACR on deployment after merging into main, it will take precedence over all feature (alpha) candidates of the same major/minor/patch version.

+

In Phase 2 with ephemeral environments, the process remains the same for Feature branches. For Pull Request (PR) builds, if the package.json/csproj is not updated, a validation error is thrown; if it is updated, the image/build is tagged with a release candidate (-RC) and the build ID. The main branch version takes precedence over all Feature (alpha & RC) candidates. With ephemeral environments, each feature deployment will deploy a unique pod (application & infrastructure).

+

Phase 1 Strategy – versioning logic (before ephemeral environments)¶

+

Feature branch build and deployments

+
    +
  1. Retrieve the version from the Main branch package.json for the repository (e.g.: 4.2.30)
      +
    1. if main and feature branches are the same version (M/M/P) then:
        +
      1. throw validation error message: "The increment is invalid. Users must increase the package.json version.". Do not continue CI.
      2. +
      +
    2. +
    3. if main and feature branch version are not same (i.e., a developer has increased Major, Minor or Patch) and Feature Branch > Main branch version, then:
        +
      1. Tag the image and build with ‘-alpha’ and build ID which becomes: 4.2.31-alpha.511210 and respect the supplied major/minor/patch.
      2. +
      +
    4. +
    +
  2. +
  3. Push this version to Container Registry (ACR) when a deploy is requested.
  4. +
+

Pull Request (PR) builds and deployments

+

No change for Phase 1, including tagging and naming. Developers merge (feature branch) version must be always above main.

+

Main branch build and deployments

+
    +
  1. New version example is: 4.2.31 (patch+1). Tag release in GitHub.
  2. +
  3. This version will be pushed to the ACR on deployment after merge into main.
  4. +
  5. The main branch version is the primary version which takes precedence above all feature (alpha) candidates of the same major/minor/patch.
  6. +
+

Phase 2 versioning logic – (with ephemeral environments are in place)

+

Feature branch builds and deployments

+
    +
  1. Retrieve the version from Main branch package.json/csproj for the repository (e.g. 4.2.30)
      +
    1. if main and feature branch are the same version (M/M/P) then:
        +
      1. throw validation error message: "The increment is invalid. Developers must increase the package.json version.". Do not continue CI.
      2. +
      +
    2. +
    3. if main and feature branch version are not same (i.e., a developer has increased Major, Minor or Patch) and Feature Branch > Main branch then:
        +
      1. Tag the image and build with ‘-alpha’ and ‘build ID’ which becomes 4.2.31-alpha.511210 and respect users major/minor/patch.
      2. +
      +
    4. +
    5. Push this version to ACR when a deploy is requested.
    6. +
    +
  2. +
+

Pull Request (PR) - builds and deployments

+
    +
  1. If package.json/csproj is not updated in the repository then throw validation message: "The increment is invalid '4.2.30' -> '4.2.30'. Please upgrade". Do not continue CI.
  2. +
  3. If package.json/csproj is updated (i.e., 4.2.31) then tag the image and build with the release candidate (-RC) and build ID which becomes: 4.2.31-rc.511211
  4. +
  5. Push this version to the Container Registry (ACR) when a deploy is requested.
  6. +
+

Main branch – build and deployments

+
    +
  1. New version example is: 4.2. 31 (patch+1). Tag release in GitHub.
  2. +
  3. This version will be pushed to the Container Registry (ACR) on deployment after merge into main.
  4. +
  5. The main branch version is the primary version which takes precedence over and above all feature (alpha & RC) candidates of the same major/minor/patch.
  6. +
+

Guidance / Context

+
    +
  • The Build ID is unique and is the ADO Pipeline build ID. It automatically increases on every CI on every image you request to be deployed (feature deployment).
  • +
  • Developers must increment Major, Minor or Patch at least once, on a Feature branch build or PR build, to merge into main successfully. The build ID is automatically increased for subsequent deployments of the same version.
  • +
  • The Main version takes priority over Alpha and RC candidates of the same major/minor/patch version.
  • +
+

Constraints

+
    +
  • Feature deployments into Sandpit/Dev will overwrite the existing deployment in terms of app code, infrastructure, and databases in Phase 1. This can cause conflicts and constraints.
  • +
  • Once ephemeral environments are delivered, PR and Feature deployments into Sandpit/Dev will have its own dedicated infrastructure, including Application, Infra and Databases.
  • +
  • SemVer and Trunk based development are mandated and designed into the Platform.
  • +
  • All merges into ‘main’ are classed as releases and are tagged in GitHub as such with the application version supplied.
  • +
  • Long-lived feature branches are not allowed and are discouraged. To deploy into a higher environment above Sandpit, you must merge into Main.
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/assets/javascripts/bundle.ad660dcc.min.js b/assets/javascripts/bundle.ad660dcc.min.js new file mode 100644 index 0000000..0ffc046 --- /dev/null +++ b/assets/javascripts/bundle.ad660dcc.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var O=f()(_);return u("cut"),O},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",O=document.createElement("textarea");O.style.fontSize="12pt",O.style.border="0",O.style.padding="0",O.style.margin="0",O.style.position="absolute",O.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return O.style.top="".concat(j,"px"),O.setAttribute("readonly",""),O.value=V,O}var te=function(_,O){var j=A(_);O.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var O=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,O):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,O):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(O){return typeof O}:H=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},O=_.action,j=O===void 0?"copy":O,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(O){return typeof O}:Ie=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var O=0;O<_.length;O++){var j=_[O];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,O){return _&&ro(V.prototype,_),O&&ro(V,O),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(O){return O.__proto__||Object.getPrototypeOf(O)},Wt(V)}function vr(V,_){var O="data-clipboard-".concat(V);if(_.hasAttribute(O))return _.getAttribute(O)}var Ri=function(V){Ci(O,V);var _=Hi(O);function O(j,D){var Y;return _i(this,O),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(O,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),O}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var M=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return M}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),B(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?M:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(G("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),B(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():M))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),B(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),B(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),B(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),B(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function G(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!G("announce.dismiss")||!e.childElementCount)return M;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);G("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:M),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?M:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):M})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||G("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),G("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||G("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:M)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return G("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),B(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return G("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),Na(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>G("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return M;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!G("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),B(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>G("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?M:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),B(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return M;let r=e.target.closest("a");if(r===null)return M;if(r.target||e.metaKey||e.ctrlKey)return M;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):M}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...G("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),M}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return M;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),M)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:G("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>M)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?M:(i.preventDefault(),I(p))}}return M}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),B(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),B(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?M:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>M),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>M),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>M),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return M}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return M}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>M),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),B(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(G("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),G("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return G("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title")))})).subscribe(),e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),B(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;G("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),B(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>G("search.highlight")?mi(e,{index$:Mi,location$:jt}):M),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),B(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.ad660dcc.min.js.map + diff --git a/assets/javascripts/bundle.ad660dcc.min.js.map b/assets/javascripts/bundle.ad660dcc.min.js.map new file mode 100644 index 0000000..6d61170 --- /dev/null +++ b/assets/javascripts/bundle.ad660dcc.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an