diff --git a/src/main/java/me/ezzedine/mohammed/personalspace/article/infra/ArticleEntity.java b/src/main/java/me/ezzedine/mohammed/personalspace/article/infra/ArticleEntity.java index 80c39b2..3c794c0 100644 --- a/src/main/java/me/ezzedine/mohammed/personalspace/article/infra/ArticleEntity.java +++ b/src/main/java/me/ezzedine/mohammed/personalspace/article/infra/ArticleEntity.java @@ -4,6 +4,8 @@ import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; +import org.springframework.data.annotation.CreatedDate; +import org.springframework.data.annotation.LastModifiedDate; import org.springframework.data.annotation.Version; import org.springframework.data.mongodb.core.mapping.Document; @@ -25,9 +27,9 @@ public class ArticleEntity { private List keywords; private boolean hidden; private String estimatedReadingTime; -// @CreatedDate + @CreatedDate private LocalDateTime createdDate; -// @LastModifiedDate + @LastModifiedDate private LocalDateTime lastModifiedDate; @Version private Long version; diff --git a/src/main/java/me/ezzedine/mohammed/personalspace/legacy/DataBackwardsCompatibilityEnabler.java b/src/main/java/me/ezzedine/mohammed/personalspace/legacy/DataBackwardsCompatibilityEnabler.java deleted file mode 100644 index 6cfc95a..0000000 --- a/src/main/java/me/ezzedine/mohammed/personalspace/legacy/DataBackwardsCompatibilityEnabler.java +++ /dev/null @@ -1,55 +0,0 @@ -package me.ezzedine.mohammed.personalspace.legacy; - -import com.fasterxml.jackson.databind.ObjectMapper; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import me.ezzedine.mohammed.personalspace.article.infra.ArticleEntity; -import me.ezzedine.mohammed.personalspace.article.infra.ArticleMongoRepository; -import me.ezzedine.mohammed.personalspace.article.infra.highlight.HighlightedArticleMongoRepository; -import org.springframework.boot.CommandLineRunner; -import org.springframework.stereotype.Component; - -import java.io.IOException; -import java.time.LocalDateTime; -import java.util.Arrays; -import java.util.List; - -@Slf4j -@Component -@RequiredArgsConstructor -public class DataBackwardsCompatibilityEnabler implements CommandLineRunner { - - private final ArticleMongoRepository articleMongoRepository; - private final HighlightedArticleMongoRepository highlightedArticleMongoRepository; - - @Override - public void run(String... args) throws IOException { - highlightedArticleMongoRepository.deleteAll(); - articleMongoRepository.deleteAll(); - - byte[] bytes = DataBackwardsCompatibilityEnabler.class.getClassLoader().getResourceAsStream("migrated-data.json").readAllBytes(); - LegacyArticleDocument[] legacyDocuments = new ObjectMapper().readValue(bytes, LegacyArticleDocument[].class); - - List entities = Arrays.stream(legacyDocuments).map(DataBackwardsCompatibilityEnabler::map).toList(); - - articleMongoRepository.saveAll(entities); - } - - private static ArticleEntity map(LegacyArticleDocument e) { - return ArticleEntity.builder() - .id(e.getId()) - .title(e.getTitle()) - .description(e.getDescription()) - .categoryId(e.getCategoryId()) - .thumbnailImageUrl(e.getThumbnailImageUrl()) - .createdDate(getDate(e.getCreatedDate())) - .lastModifiedDate(getDate(e.getLastModifiedDate())) - .estimatedReadingTime(e.getEstimatedReadingTime()) - .content(e.getContent()) - .build(); - } - - private static LocalDateTime getDate(String date) { - return LocalDateTime.parse(date); - } -} diff --git a/src/main/java/me/ezzedine/mohammed/personalspace/legacy/LegacyArticleDocument.java b/src/main/java/me/ezzedine/mohammed/personalspace/legacy/LegacyArticleDocument.java deleted file mode 100644 index fbc9ccc..0000000 --- a/src/main/java/me/ezzedine/mohammed/personalspace/legacy/LegacyArticleDocument.java +++ /dev/null @@ -1,25 +0,0 @@ -package me.ezzedine.mohammed.personalspace.legacy; - -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.List; - -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class LegacyArticleDocument { - private String id; - private String title; - private String description; - private String estimatedReadingTime; - private String categoryId; - private String thumbnailImageUrl; - private String content; - private String createdDate; - private String lastModifiedDate; - private List keywords; -} diff --git a/src/main/resources/migrated-data.json b/src/main/resources/migrated-data.json deleted file mode 100644 index 98b08e6..0000000 --- a/src/main/resources/migrated-data.json +++ /dev/null @@ -1 +0,0 @@ -[{"id":"vpVOxDAmqn0","title":"SSL Certificate Auto-Renewal","description":"In this article we will go over a simple procedure to automatically renew the SSL certificate generated by Let's Encrypt","estimatedReadingTime":"2 min read","categoryId":"devops","thumbnailImageUrl":"https://stuggi.files.wordpress.com/2019/08/le-logo-twitter.png","content":"

In a previous article, I went over the steps to generate an SSL certificate using Let's Encrypt's certbot run in a docker container.

Problem

Let's Encrypt is a non-profit open certificate authority, and it generates certificates valid for 90 days only. So, you're required to renew your certificates every 3 months, which sounds like a lot of manual work to do ;)

Cron Jobs

In Linux systems, a corn job is a task that executes every defined amount of time. It is easy to set up and use. The syntax for it is a bit strange at first, so we're not going to dig deep into it, but you can check out some online references for more details about it. 

To address our problem, we're going to set up a cron job that runs on the 1st and 15th of every month or so at 00:00 AM and 12:00 PM. It sounds like a lot of unnecessary executions, but considering that Let's Encrypt's certbot allows you to renew your certificates only in the margin of 30 days before their expiry dates, we're trying to keep some safety margin here.

In your Linux machine, run the following command to open the list of cron jobs that you have defined:

crontab -e

Add a new line at the end of the file and add the following task:

0 0,12 1,15 */2 * docker run --rm -p 80:80 -p 443:443 -v /root/nginx/letsencrypt:/etc/letsencrypt certbot/certbot certonly -d {your_domain} --standalone -m {your_email} --agree-tos

The first part of the command: (0 0,12 1,15 */2 *) is the cron task timer that we mentioned earlier. You can check out some online cron timer editor to help you understand and write such syntax.

The rest of the command runs the certbot in a docker container to generate the new certificate.

Once you finish with the file, you can save it and close it. Now your cron job will run at the specified times :) 

","createdDate":"2023-08-27T13:31:19.765","lastModifiedDate":"2023-08-27T13:32:02.026","keywords":["SSL","Let's Encrypt","Auto-Renewal"]},{"id":"G2LNd7Rm65r","title":"Consumer-Driven Contract Tests with PACT","description":"Contract Tests can be considered a major block in the development lifecycle when working on a distributed system architecture. Such type of testing can be driven by either the consumer or the provider. Today, we're going to discover the consumer-driven contract testing using PACT.","estimatedReadingTime":"10 min read","categoryId":"software-development","thumbnailImageUrl":"https://pact.io/assets/img/logo-black.png","content":"

Why Contract Testing?

In a previous article, we went over the need for testing the contract of communications between services when working in a distributed systems architecture. Today, we are going to dig deep into the practicality of putting this concept into action using PACT.

Why PACT?

After doing some research about what is there in the market, PACT was one of the best options since it provides a well-structured process of defining, managing, and validating contract tests. They provide us with a broker to store and version the pacts, which can be a powerful tool to monitor the state of the communication of your tool and even prevent the release of code that breaks the stated contracts.

Also, PACT works with a variety of programming languages and supports most of the famous frameworks, such as SpringBoot and .NET,

Ingredients... \uD83E\uDDD1‍\uD83C\uDF73

First, we will need some starter code. For the sake of this demo, I am going to create two projects: a provider and a consumer. Our domain will be \"spells from Harry Potter\" (yes, I am a big fan). So the provider acts like a server where we can fetch the list of available spells and add to them, while the consumer is a simple console application that uses these APIs. You can find the code for this stage of the demo here.

Time to Get Down To Business

Now that we have our starter code ready, it's time to start writing some contracts.

Setting Up The Needed Infrastructure

First, we will need to set up the PACT Broker container, so we will need the following configurations (which can be added to the existing docker-compose file):

version: \"3\"\nservices:\n  postgres:\n    image: postgres\n    healthcheck:\n      test: psql postgres --command \"select 1\" -U postgres\n    volumes:\n      - postgres-volume:/var/lib/postgresql/data\n    environment:\n      POSTGRES_USER: postgres\n      POSTGRES_PASSWORD: password\n      POSTGRES_DB: postgres\n\n  pact-broker:\n    image: pactfoundation/pact-broker:2.107.0.1\n    ports:\n      - \"9292:9292\"\n    depends_on:\n      - postgres\n    environment:\n      PACT_BROKER_PORT: '9292'\n      PACT_BROKER_DATABASE_URL: \"postgres://postgres:password@postgres/postgres\"\n      PACT_BROKER_LOG_LEVEL: INFO\n      PACT_BROKER_SQL_LOG_LEVEL: DEBUG\n      PACT_BROKER_DATABASE_CONNECT_MAX_RETRIES: \"15\"\n\nvolumes:\n  postgres-volume:

Now if spin up the above containers by running the command

docker-compose up -d 

and navigate to http://localhost:9292/ we will see the following screen:

\"\"
 

 

HTTP Contracts

We will first begin with synchronous request/response contracts.

Consumer's Side:

From the consumer's side, we would need first to add the following dependency:

testImplementation(\"au.com.dius.pact.consumer:junit5:4.5.5\")

We will also need two gradle plugins:

  1. PACT plugin: to provide us with the PACT gradle tasks
  2. a plugin to provide us with GIT information about the project (will be needed for versioning the contracts)
plugins {\n    id(\"au.com.dius.pact\") version \"4.5.5\"\n    id(\"org.ajoberstar.grgit\") version \"4.1.1\"\n}\n\npact {\n    publish {\n        pactBrokerUrl = \"http://localhost:9292/\" // our pact broker endpoint\n        version = grgit.head().abbreviatedId  // the commit id will represent the contract version\n        consumerBranch = grgit.branch.current().getName() // we can tag the contracts with the branch name\n    }\n}

Now that we have our dependencies set up, we can proceed with writing our contract tests. We will create a test class \"SpellRestClientConsumerContractTest\" in which we will add all our consumer REST contracts. For the sake of simplicity, we will only show one of the contracts here, however, you can check out the rest of them in the repository.

@ExtendWith(PactConsumerTestExt.class)\nclass SpellRestClientConsumerContractTest {\n\n    public static final String CONSUMER = \"spell-client\";\n    public static final String PROVIDER = \"spell-server\";\n\n    @Pact(consumer = CONSUMER, provider = PROVIDER)\n    @SuppressWarnings(\"unused\")\n    V4Pact getAllSpellsContract(PactDslWithProvider builder) {\n        return builder\n                .given(\"spells exist\")\n                .uponReceiving(\"a request to fetch all spells\")\n                    .path(\"/spells\")\n                    .method(\"GET\")\n                .willRespondWith()\n                    .status(200)\n                    .body(PactDslJsonArray.arrayEachLike()\n                            .stringType(\"name\")\n                            .stringType(\"description\")\n                    )\n                .toPact(V4Pact.class);\n    }\n\n    @Test\n    @DisplayName(\"validate contract for getting all spells\")\n    @PactTestFor(pactMethod = \"getAllSpellsContract\")\n    void validate_contract_for_getting_all_spells(MockServer mockServer) {\n        RestTemplate restTemplate = new RestTemplateBuilder().rootUri(mockServer.getUrl()).build();\n        SpellClient spellRestClient = new SpellClient(restTemplate);\n        List<Spell> spells = spellRestClient.getSpells();\n        assertNotEquals(0, spells.size());\n    }\n}

You can see that in this example, we are defining a contract for fetching all the existing magic spells under the path \"/spells\". We also assumed that some spells do exist. Then, the contract states that the endpoint should return a list of JSON objects, each containing two fields: \"name\" and \"description\". We then have a test to validate the contract and generate the needed file.

How PACT works here:

Adding the contract is not enough for the process. We also need to add a test with annotation @PactTestFor(pactMethod = \"getAllSpellsContract\") in order to tell PACT that this test is to validate the mentioned contract and generate its file.

Running the test method would generate a file named \"spell-client-spell-server.json\" under the directory \"build/pacts\". This file is PACT's JSON representation of the defined contract.

Publishing to the Broker

Once we run our consumer contract tests and have the contract files autogenerated for us, we need to run the following command to publish them to the PACT broker:

gradlew client:pactPublish

Once the command ends, we can see that the contract is added to the broker:

\"\"
 

\"\"
 

We can see that the broker displays the contracts in a clear format that can act as versioned documentation.

Provider's Side:

Once the contracts are published to the broker, we can proceed from the Provider's side by downloading them and validating them against our application:

First, we would need first to add the following dependency:
 

testImplementation(\"au.com.dius.pact.provider:junit5spring:4.5.5\")

And same the consumer, we need the PACT plugin alongside the GIT information provider one:

plugins {\n    id(\"au.com.dius.pact\") version \"4.5.5\"\n    id(\"org.ajoberstar.grgit\") version \"4.1.1\"\n}

We also need to add some system properties for the validation procedure:

tasks.withType<Test>() {\n    systemProperty(\"pact.verifier.publishResults\", \"true\") // for PACT to pulish the result of the contracts validation\n    systemProperty(\"pact.provider.branch\", grgit.branch.current().getName()) // to tag the provider's validation with the current branch\n    systemProperty(\"pact.provider.version\",  grgit.head().abbreviatedId) // to tag the provider's validation with the current commit id as a version\n}

Also, we need to specify the pact broker endpoint in the application.yml:

pactbroker:\n  url: http://localhost:9292

Now that we have everything set up, we can proceed with writing the provider's test class to validate the contracts. The trick with PACT is that as a provider, we don't need to do the actual validation. All we have to do is to prepare the grounds for PACT to run a simulation and validate the contracts. To do we will need to:

  1. Run the application to allow PACT to simulate the communication from the consumer
  2. Provide the needed state mocking to simulate all the scenarios
@SpringBootTest(\n        webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT,\n        classes = {\n                SpellController.class,\n                SpellService.class\n        }\n)\n@Provider(\"spell-server\")\n@PactBroker\n@EnableAutoConfiguration\nclass SpellServerProviderContractTest {\n\n    @LocalServerPort\n    private int port;\n\n    @MockBean\n    private SpellFetcher spellFetcher;\n\n    @MockBean\n    private SpellPersister spellPersister;\n\n    @TestTemplate\n    @ExtendWith(PactVerificationSpringProvider.class)\n    void pactVerificationTestTemplate(PactVerificationContext context) {\n        context.verifyInteraction();\n    }\n\n    @BeforeEach\n    void setUp(PactVerificationContext context, Pact pact) {\n        if (pact.isRequestResponsePact()) {\n            context.setTarget(new HttpTestTarget(\"localhost\", port));\n        }\n    }\n\n    @State(\"spells exist\")\n    @SuppressWarnings(\"unused\")\n    void toSpellsExistState() {\n        when(spellFetcher.getAll()).thenReturn(List.of(\n                Spell.builder().name(\"hokus pokus\").description(\"does anything\").build(),\n                Spell.builder().name(\"Lumos maxima\").description(\"An improved version of the lumos spell\").build()\n        ));\n    }\n\n}

You may have noticed that when we defined the contract from the Consumer's side, we added an assumption that \"spells exist\". To achieve this state, PACT provides us with what they call \"state changers\", where we have a method annotated with @State(\"spells exist\") and inside it, we can mock our internal services.

Once we run this test class, PACT will pull the contracts from the broker for the provider \"spell-server\", validate them, and publish the validation result back to the broker. When this is done, we can see that the result is reflected in the broker:

\"\"
 

Messaging Contracts

Messaging contracts are a bit trickier since the server can become the consumer for the first command, and then the provider if it sends an event back, and the same applies to the client. For the sake of simplicity, we will assume that the client will send an event to the server that a new spell is requested, which the server will handle, but without sending anything back. In this scenario, the client becomes the provider since it sends the event, and the server becomes the consumer (it's confusing...  I know)
 

 

Consumer's Side:

Same as before, we would need to add the required dependencies and configurations if not yet added. I will then jump to the actual code:

@ExtendWith(PactConsumerTestExt.class)\n@PactTestFor(providerName = \"spell-client\", providerType = ProviderType.ASYNCH, pactVersion = PactSpecVersion.V4)\nclass AddingNewSpellAsyncConsumerContractTest {\n\n    @Pact(consumer = \"spell-server\")\n    @SuppressWarnings(\"unused\")\n    V4Pact newSpellRequestEventContract(MessagePactBuilder builder) {\n        return builder\n                .expectsToReceive(\"a new spell requested event\")\n                .withMetadata(Map.of(\n                        \"routing-key\", \"spells.add\",\n                        \"exchange\", \"spells\"\n                ))\n                .withContent(new PactDslJsonBody()\n                        .stringType(\"name\")\n                        .stringType(\"description\")\n                )\n                .toPact(V4Pact.class);\n    }\n\n    @Test\n    @DisplayName(\"validate contract for new spell requested event\")\n    @PactTestFor(pactMethod = \"newSpellRequestEventContract\")\n    void validate_contract_for_new_spell_requested_event(V4Interaction.AsynchronousMessage message) throws IOException {\n        NewSpellRequestedEvent event = new ObjectMapper().readValue(message.getContents().getContents().getValue(), NewSpellRequestedEvent.class);\n        assertNotNull(event);\n    }\n\n}

This code is at the server's side (the consumer in this case), where it expects an event to be received at the exchange \"spells\" and routing key \"spells.add\". The event body will contain two fields: \"name\" and \"description\". Ofcourse, we have a test method to validate this contract and generate the JSON file for it.

Once run, we can execute the following command to publish the new contract to the broker:

gradlew server:pactPublish

We can see that a new contract was added:

\"\"
 

Unfortunately, PACT broker does not yet support clear visualization for messages contracts :/

Provider's side

As we did previously, we need to add a new test class in the provider (in this case the client) to validate the published contracts:

\n@SpringBootTest(\n        webEnvironment = SpringBootTest.WebEnvironment.NONE,\n        classes = RabbitProperties.class\n)\n@Provider(\"spell-client\") // for PACT to know to fetch the contracts for provider \"spell-client\"\n@PactBroker\nclass SpellClientProviderContractTest {\n\n    @Autowired\n    private RabbitProperties rabbitProperties;\n\n    @TestTemplate\n    @ExtendWith(PactVerificationSpringProvider.class)\n    void pactVerificationTestTemplate(PactVerificationContext context) {\n        context.verifyInteraction();\n    }\n\n    @BeforeEach\n    void setUp(PactVerificationContext context, Pact pact) {\n        if (!pact.isRequestResponsePact()) {\n            context.setTarget(new MessageTestTarget());\n        }\n    }\n\n    @PactVerifyProvider(\"a new spell requested event\")\n    @SuppressWarnings(\"unused\")\n    MessageAndMetadata provideNewSpellRequestEvent() throws JsonProcessingException {\n        NewSpellRequestedEvent event = NewSpellRequestedEvent.builder().name(UUID.randomUUID().toString()).description(UUID.randomUUID().toString()).build();\n        byte[] eventBytes = new ObjectMapper().writeValueAsBytes(event);\n        Map<String, String> metadata = Map.of(\n                \"routing-key\", rabbitProperties.getAddSpellRoutingKey(),\n                \"exchange\", rabbitProperties.getAddSpellExchange()\n        );\n        return new MessageAndMetadata(eventBytes, metadata);\n    }\n}\n

In the previous scenario, when we were validating REST contracts, we were running the controllers and expecting PACT to run a simulation of the communication. The case here is different. We only need to provide PACT with a sample of the event that would be sent in the real scenario, and PACT will do the validation from here onwards. This is being done by having a method annotated with @PactVerifyProvider(\"a new spell requested event\") where \"a new spell requested event\" is what is mentioned in the contract as the type of event expected to be sent.

Same as before, running the test class will pull the contracts from the broker, validate them, and publish the result back to the broker.

\"\"
 

The full code can be found on this GitHub repository.

","createdDate":"2023-07-15T17:30:11.658","lastModifiedDate":"2023-09-01T09:58:53.167","keywords":["Development Lifecycle","Contract-Testing","Consumer-Driven Contract-Testing","PACT","Spring Boot","Java"]},{"id":"ojBRmoPdYq2","title":"Contract Testing: Yay or Nay?","description":"Tests are an essential part of the development lifecycle. They ensure good code quality and meet the business value behind the unit under test. Since different types of tests target different objectives, there comes a need for a fast \"offline\" test that targets the communication principles amongst different parties, hence contract tests.","estimatedReadingTime":"4 min read","categoryId":"software-development","thumbnailImageUrl":"https://emlaw.co.uk/wp-content/uploads/2021/03/Incorporated-By-Reference.jpg","content":"

Motivation

While working with a distributed systems architecture gives you a significant advantage regarding scalability and de-coupling unrelated domains, it has some drawbacks. It is true that systems are independent of each other. Yet, in most cases, some consume others, thus having a hidden dependency on them. This dependency is summarized in the three bases of communication: protocol, structure, and value. These three bases make up the communication agreement or the \"contract\".

Scope of Contract Testing

Contract testing can be applied to any sort of communication that happens between two services. It's mostly common on REST APIs, but it can also be used for messaging, calls over gRPC, etc.

Why Contract Tests?

Someone can argue that the value of validating contracts can be achieved with end-to-end tests or on-edge types of integration tests, where you can spin up a container of the provider you're validating against its contract. However, this approach is usually costly in time and storage resources and it usually slows down your development lifecycle if you're running these tests before pushing to the mainstream. Thus the need for an efficient type of testing where we are only concerned about simulating the communication (be it request/response, events, etc.) and validating its main principles.

Types of Contract Tests

Provider-Driven:

The first approach for contract testing is when it's initiated by the provider (a.k.a server). In this context, the provider writes the contracts that describe its APIs, then publishes them to a remote shared server.

\"\"
 

On the other hand, the client downloads the contracts of the provider they're interested in and runs tests against them to validate that they're getting back the response they expect.
 

\"\"
 

Provider-driven contract testing is featured in Spring Cloud Contract where the contracts are written either as YAML or groovy files and are packaged as a stub JAR and published to an artifact manager.
 

Consumer-Driven:

The second approach for contract testing is when the consumer (a.k.a. the client) is the one to initiate the process, where each consumer writes the contract that they expect the provider to fulfill and publishes these contracts to a remote shared server.
 

\"\"


 

On the other hand, the provider has to download all the contracts assigned to them and do the needed development to fulfill their requirements.
 

\"\"
 

To follow consumer-driven contract testing, PACT offers a nice solution where the contracts are written in a DSL and are published as \"pacts\" to a broker (pact-broker). It provides a nice way of versioning contracts and documenting them.
 

Provider-Driven vs. Consumer-Driven Contract Testing:

Provider-Driven Contract Testing

Pros:

  • the provider does not have to be aware of the consumers (recommended when working on a public API; e.g. Azure APIs)

Cons:

  • it is not preventative: we cannot prevent the provider from changing the contract in a way that breaks the consumer
  • the consumer is not aware of the change of contracts before it happens

Consumer-Driven Contract Testing

Pros:

  • it is preventative: any change suggested by the provider cannot be merged if it breaks an existing contract

Cons:

  • gives more power to the consumer: the provider will stay blocked until the contracts of all of its consumers are fulfilled
  • contracts coming from different consumers can be conflicting, and that is a dead-lock

Note: this is mostly recommended when working in internal APIs, where the provider and the consumer are aware of each other, so they would have the privilege of communicating while writing the contracts. 

","createdDate":"2023-07-12T09:54:52.639","lastModifiedDate":"2023-07-12T10:00:42.007","keywords":["Development Lifecycle","Contract-Testing"]},{"id":"X402gZvgPGv","title":"Pattern Matching Using RegEx","description":"Regular Expressions are a very powerful tool. When done right, they can be used to extract information from texts in a clean way. In this article, we will see an example done in Java to extract information from an article's URL.","estimatedReadingTime":"less than a minute read","categoryId":"software-development","thumbnailImageUrl":null,"content":"

Motivation

Imagine I have the URL \"https://mohammed.ezzedine.me/article/X402gZvgPGv\" which I want to extract from some information, such as host, domain name, article ID, etc. 
This can be done through code by splitting on the special characters but would be nasty, hard to read, and hard to maintain. That's why we will be using pattern matching with RegEx to do the job.

Java Pattern Matching

We will be using for this the concept of Named Capturing Groups

import java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\nclass Scratch {\n\n    private static final String URL_REGEX = \"(?<protocol>.*?)://(?<host>(?<subdomain>.*?)\\\\.(?<domain>.*?)\\\\.(?<domainExtension>.*?))/(?<section>.*?)/(?<sectionId>.*?)\";\n\n    public static void main(String[] args) {\n        String url = \"https://mohammed.ezzedine.me/article/X402gZvgPGv\";\n\n        Pattern pattern = Pattern.compile(URL_REGEX);\n        Matcher matcher = pattern.matcher(url);\n\n        if (matcher.matches() && matcher.groupCount() > 0) {\n            System.out.printf(\"Protocol: `%s`%n\", matcher.group(\"protocol\"));\n            System.out.printf(\"Host: `%s`%n\", matcher.group(\"host\"));\n            System.out.printf(\"Sub-domain: `%s`%n\", matcher.group(\"subdomain\"));\n            System.out.printf(\"Domain Name: `%s`%n\", matcher.group(\"domain\"));\n            System.out.printf(\"Domain Name Extension: `%s`%n\", matcher.group(\"domainExtension\"));\n            System.out.printf(\"Section: `%s`%n\", matcher.group(\"section\"));\n            System.out.printf(\"Section ID: `%s`%n\", matcher.group(\"sectionId\"));\n        }\n    }\n}

The output:


 

Protocol: `https`

Host: `mohammed.ezzedine.me`

Sub-domain: `mohammed`

Domain Name: `ezzedine`

Domain Name Extension: `me`

Section: `article`

Section ID: `X402gZvgPGv`

Note here that:

  • the syntax for a group is simple: (?<groupName>{regexMatchingGroupContent})
  • the group content can contain the syntax for a nested group
","createdDate":"2022-07-06T18:41:14.138","lastModifiedDate":"2022-07-06T18:46:00.666","keywords":["Pattern Matching","Java","RegEx"]},{"id":"LokwxLXxG8D","title":"Storing Dynamic Objects in MongoDB with C#","description":"The main purpose of using a NoSQL DB vendor such as MongoDB is to take advantage from the flexibility it provides at the level of the structure of the stored data. However, when used with C#, storing a field whose structure is dynamic and differs from object to another is a bit difficult.","estimatedReadingTime":"2 min read","categoryId":"software-development","thumbnailImageUrl":null,"content":"

Motivation

For the sake of this article, let's assume we have a class Vehicle than can be car, motorcycle, etc. We want to store our data in the database in a structure similar to this:

{\n  \"vehicles\": [\n    { \n      \"type\": \"car\", \n      \"model\": \"Tesla\", \n      \"color\":  \"white\"\n    },\n    { \n      \"type\": \"motorcycle\", \n      \"brand\": \"Yamaha\"\n    },\n    { \"type\": \"plane\", \n      \"numberOfSeats\": 100,\n      \"previousFlights\": [\n        { \"date\": \"03-11-2021\", \"from\": \"Beirut\", \"to\": \"Istanbul\" },\n        { \"date\": \"21-12-2021\", \"from\": \"Paris\", \"to\": \"Beirut\" }\n      ]\n    },\n    { \n      \"type\": \"car\", \n      \"model\": \"Mercedes\", \n      \"model\":  \"2022\"\n    },\n  ]\n}

We can, of course, create a class for each vehicle covering all the possible fields. However, this approach is not recommended when the data is not limited to a specific structure, because any new model would require new development to be done. Instead, we can treat a vehicle as a JSON object and store it as is in the database taking advantage of the power NoSQL databases provide us with.

Prerequisite

In this example, I will be using the official MongoDB Driver for C#.

Implementation

To achieve our goal we will create two representations for the vehicle class, one to serve as a database entity, and another one as an API data transfer object.

VehicleEntity

In the database entity, we will represent the content of the vehicle as a BsonArray:

using MongoDB.Bson;\nusing MongoDB.Bson.Serialization.Attributes;\n\nnamespace Mohammed.Ezzedine.MongoExample.Entities;\n\npublic class VehicleEntity\n{\n    [BsonId]\n    [BsonRepresentation(BsonType.Int32)]\n    public int Id { get; set; }\n\n    public BsonArray Content { get; set; }\n}

VehicleDto

For the API data transfer object, and since BsonArray is a representation scoped for MongoDB, we will use JsonNode. This way, we are not coupling our API behavior to the DB vendor we are using.

using System.Text.Json.Nodes;\n\nnamespace Mohammed.Ezzedine.MongoExample.Dtos;\n\npublic class VehicleDto\n{\n    public int Id { get; set; }\n\n    public JsonNode Content { get; set; }\n}

Mapping

When our API receives a request to store a new vehicle in the database, the content will be in the form of a JsonNode, however, we want it to be a BsonArray, and this conversion is not done implicitly. To solve this issue, we can convert back and forth between the twp types as follows:

using System.Text.Json.Nodes;\nusing MongoDB.Bson;\nusing MongoDB.Bson.Serialization;\nusing Mohammed.Ezzedine.MongoExample.Dtos;\nusing Mohammed.Ezzedine.MongoExample.Entities;\n\npublic class VehicleDataMapping\n{\n    public static VehicleEntity map(VehicleDto dto)\n    {\n        return new VehicleEntity\n        {\n            Id = dto.Id,\n            Content = BsonSerializer.Deserialize<BsonArray>(dto.Content.ToString(), null)\n        };\n    }\n\n    public static VehicleDto map(VehicleEntity entity)\n    {\n        return new VehicleDto\n        {\n            Id = entity.Id,\n            Content = JsonArray.Parse(entity.Content.ToString(), null, default)\n        };\n    }\n}
","createdDate":"2022-05-29T17:33:06.951","lastModifiedDate":"2022-05-29T17:36:48.865","keywords":[".NET","MongoDB","NoSQL","C#"]},{"id":"3NDMxqyxP68","title":"How Dependency Injection Makes You Write Better Unit Tests","description":"The purpose of writing unit tests is to test a unit in isolation. However, sometimes isolation may not be accessible when the unit we are testing logically depends on other external units. Here comes Dependency Injection (DI) alongside other concepts to serve the mentioned purpose.","estimatedReadingTime":"4 min read","categoryId":"software-development","thumbnailImageUrl":null,"content":"

Motivation

Let's start with the following example:

package me.ezzedine.mohammed.di;\n\nimport me.ezzedine.mohammed.di.Repository;\nimport me.ezzedine.mohammed.di.LibraryService;\nimport lombok.NonNull;\n\npublic class Library {\n    private Repository<Book> bookRepository;\n    private Repository<Client> clientRepository;\n    private LibraryService libraryService;\n\n    public Library() {\n        bookRepository = new BookRepository();\n        clientRepository = new ClientRepository();\n        libraryService = new LibraryService();\n    }\n\n    public void borrowBook(@NonNull String bookIsbn, @NonNull String clientId) throws BookNotFoundException, ClientNotFoundException {\n        Book book = bookRepository\n                .get(bookIsbn)\n                .orElseThrow(() -> new BookNotFoundException(bookIsbn));\n\n        Client client = clientRepository\n                .get(clientId)\n                .orElseThrow(() -> new ClientNotFoundException(clientId));\n\n        if (libraryService.isBookAvailable(book) && libraryService.isClientEligibleToBorrow(client)) {\n            libraryService.clientBorrowBook(clientId, bookIsbn);\n        }\n    }\n}

In the above code, we have a class representing a library. This class references other helper classes, which are: Repository<Book> and Repository<Client> for data access, and LibraryService for the application logic of a library. For the sake of this example, we limited the functionality of the class to one method only to borrow a book. The logic of the method is straightforward: get the book and client, validate them, and delegate the call to the application layer. However, testing it in isolation from the logic in the needed dependencies is not possible, since we then will be testing all four classes together, hence, it's not a unit test anymore.

Concept

The concept of dependency injection is to avoid creating instances of the needed classes from inside the class that requires them. Instead, we require them in the constructor arguments and move the complexity one step higher. That can be done easily as follows:

package me.ezzedine.mohammed.di;\n\nimport me.ezzedine.mohammed.di.Repository;\nimport me.ezzedine.mohammed.di.LibraryService;\nimport lombok.NonNull;\n\npublic class Library {\n    private Repository<Book> bookRepository;\n    private Repository<Client> clientRepository;\n    private LibraryService libraryService;\n\n    public Library(Repository<Book> bookRepository, Repository<Client> clientRepository, LibraryService libraryService) {\n        this.bookRepository = bookRepository;\n        this.clientRepository = clientRepository;\n        this.libraryService = libraryService;\n    }\n\n    // rest of the class\n}

We could also use different ways to inject the requried instances, like using setters, or the famous builder pattern.

Mocking

Dependency injection alone does not solve our original issue, which is testing in isolation. It only provides us with the ability to provide the required instances at runtime. In order to achieve the concept of isolation, we need to provide mock instances of the needed classes.

The idea of mocks is fairly simple: I want to provide an instance that is compatible with the required type at compile-time, but I also want to specify its behavior and the values it returns. This way, I can control the full scenario of execution when writing a unit test.

There are several libraries that help create mock instances of types. In java, I prefer Mockito.

Writing Unit Tests With Mocks

The following example demonstrates the concept of writing unit tests with the help of dependency injection and mocks (using Mockito)

package me.ezzedine.mohammed.di;\n\nimport me.ezzedine.mohammed.di.Repository;\nimport me.ezzedine.mohammed.di.LibraryService;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.mockito.Mockito;\n\nimport static org.junit.jupiter.api.Assertions.*;\n\nclass LibraryTest {\n\n    private Repository<Book> bookRepository;\n    private Repository<Client> clientRepository;\n    private LibraryService libraryService;\n    \n    private Library library;\n    \n    @BeforeEach\n    public void testSetUp() {\n        bookRepository = Mockito.mock(Repository.class);\n        clientRepository = Mockito.mock(Repository.class);\n        libraryService = Mockito.mock(LibraryService.class);\n        \n        library = new Library(bookRepository, clientRepository, libraryService);\n    }\n\n    // tests\n\n}

The first step we did is that created mock instances of each of the required services, then we passed them to the library class that we are interested in testing.

Our next step is to start writing the tests:

package me.ezzedine.mohammed.di;\n\nimport me.ezzedine.mohammed.di.Repository;\nimport me.ezzedine.mohammed.di.LibraryService;\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.DisplayName;\nimport org.junit.jupiter.api.Test;\nimport org.mockito.Mockito;\n\nimport static org.junit.jupiter.api.Assertions.*;\n\nclass LibraryTest {\n\n    // ...\n\n    @Test\n    @DisplayName(\"Throws BookNotFoundException when book with the provided ISBN is not found.\")\n    public void throws_book_not_found_exception_when_book_does_not_exist() {\n        Mockito.when(bookRepository.get(Mockito.any())).thenThrow(BookNotFoundException.class);\n        assertThrows(ookNotFoundException.class, library.borrowBook(\"ISBN\", \"clientId\"));\n    }\n}

In the above test, we are specifying the behavior of the book repository mock instance by explicitly telling it to throw an exception whenever the method get() is called. This way, we can test the behavior of the method Library.borrowBook in isolation in the scenario where the book is not found.

We can do more of this with the functionalities Mockito and other similar libraries provide us with. All this help us achieve a good quality of tests that are run in isolation of external dependencies.

","createdDate":"2022-05-28T12:16:00.617","lastModifiedDate":"2022-05-28T12:23:33.390","keywords":["Dependency Injection","Unit Tests","Mockito","Mocks","Java"]},{"id":"MVR7xzpd4qr","title":"Your Way to Maven","description":"In this article, I'll be listing all the important notes and maven scripts that you'll probably need while developing in Java. It is more of an FAQ page.","estimatedReadingTime":"less than a minute read","categoryId":"software-development","thumbnailImageUrl":null,"content":"

How to compile a maven project?

mvn clean install -s settings.xml

How to compile in parallel threads?

mvn -T {number_of_threads} clean install

How to generate a runnable JAR?

Add this to your pom.xml file

<build>\n    <plugins>\n        <plugin>\n            <groupId>org.apache.maven.plugins</groupId>\n            <artifactId>maven-jar-plugin</artifactId>\n            <version>3.1.0</version>\n            <configuration>\n                <archive>\n                    <manifest>\n                        <addClasspath>true</addClasspath>\n                        <mainClass>{your_class_full_name}</mainClass>\n                    </manifest>\n                </archive>\n            </configuration>\n        </plugin>\n    </plugins>\n</build>

How to generate one JAR including all dependencies?

In the pom.xml file:

<build>\n    <plugins>\n        <plugin>\n            <artifactId>maven-assembly-plugin</artifactId>\n            <executions>\n                <execution>\n                    <id>make-assembly</id> \n                    <phase>package</phase>\n                    <goals>\n                        <goal>single</goal>\n                    </goals>\n                    <configuration>\n                        <archive>\n                            <manifest>\n                                <mainClass>{your_class_full_name}</mainClass>\n                            </manifest>\n                        </archive>\n                        <descriptorRefs>\n                            <descriptorRef>jar-with-dependencies</descriptorRef>\n                        </descriptorRefs>\n                        <finalName>{output_jar_name}</finalName>\n                        <appendAssemblyId>false</appendAssemblyId>\n                    </configuration>\n                </execution>\n            </executions>\n        </plugin>\n    </plugins>\n</build>

How to exclude tests from the compilation process?

mvn clean install -DskipTests -Dmaven.test.skip=true -Dmaven.test.skip.exec

How to run a main method from the command line?

mvn compile exec:java -D\"exec.mainClass\"=\"{your_class_full_name}\"
","createdDate":"2022-05-27T09:52:25.569","lastModifiedDate":"2022-09-21T12:52:59.632","keywords":["Maven","Java"]},{"id":"YwyWg2YdqlQ","title":"You Should Be Using Inversion of Control Already!","description":"Inversion of Control (IoC) is one of the hot topics when speaking of best practicies in programming. It helps decouple your applications from each other and reach a better separation of concerns.","estimatedReadingTime":"4 min read","categoryId":"software-development","thumbnailImageUrl":null,"content":"

Motivation

For the sake of this article, let's imagine a small scenario, where we have a project named \"reporting\" that is responsible for generating reports for the user. This project uses a library on the side called \"printer\" that is responsible for printing the reports. The first approach for the solution architecture would be like this:

printer project:

contains the following class to handle the printing jobs:

package printer;\n\nimport document.Document;\n\npublic class Printer {\n    public void print(Document document) {\n\n    }\n}

reporting project:

contains the following class: 

package reporting;\n\nimport document.Document;\nimport printer.Printer;\n\npublic class ReportGenerator {\n\n    private final Printer printer;\n\n    public ReportGenerator() {\n        printer = new Printer();\n    }\n\n    public void generate() {\n        Document document;\n        // ...\n        printer.print(document);\n    }\n}\n

Note here that we are referencing the Printer and the Document classes, which live in the printer project, from the reporting project. So the compile-time dependency graph looks something like this:


 

Issue

The problem with the above graph is that any change done in the printer job would require restarting the printer and the reporting job projects. That wouldn't be an issue if \"printer\" was the main project, but in this case, \"reporting\" is. Imagine having a bunch of other libraries being used by reporting besides \"printer\", then any change on any library would require restarting the main project. Technically speaking, this makes our main project coupled and dependent on the side helper projects, and this is the core problem. We would like to have the case reversed, where the libraries depend on the main project.

Concept

What Inversion of Control introduces is the following: let the main project define abstract classes or interfaces of all the external tools it needs at runtime, and whenever an instance of this type is needed, get one from some sort of a global pool. On the other hand, whoever is interested in providing any of the needed services, has to provide the implementation of this interface or abstract class, and register an instance of this implementation in the global pool previously mentioned. This way, the external tools are the ones to depend on the main project at compile time and not the other way around. Also, if all goes well, we can guarantee at runtime that an instance of the needed service will be available in the pool.

Implementation

In the reporting project:

  • We add an interface to the Document class:
package reporting;\n\npublic interface Report {\n}
  • We add an interface to the Printer class:
package utils;\n\nimport reporting.Report;\n\npublic interface PrinterUtils {\n    void print(Report report);\n}
  • And finally, modify the ReportGenerator class as follows:
package reporting;\n\nimport utils.PrinterUtils;\n\npublic class ReportGenerator {\n\n    private final PrinterUtils printer;\n\n    public ReportGenerator(PrinterUtils printer) {\n        this.printer = printer;\n    }\n\n    public void generate() {\n        Report report;\n        // ...\n        printer.print(report);\n    }\n}

And notice here that there is no need for the reporting project to depend on the printer project anymore.

In the printer project:

  • We make the class Document implement the interface Report provided by the reporting project:
package document;\n\nimport reporting.Report;\n\npublic class Document implements Report{\n}
  • We also make the Printer class implement the PrinterUtils interface provided by the reporting project:
package printer;\n\nimport reporting.Report;\nimport utils.PrinterUtils;\n\npublic class Printer implements PrinterUtils {\n    public void print(Report report) {\n\n    }\n}\n

orchestrator

We also need an orchestrator project whose job is to start the reporting project providing it with the needed implementations. So this will depend on both reporting and printer projects.

public class Main {\n    public static void main(String[] args) {\n        PrinterUtils printer = new Printer();\n        ReportGenerator reportGenerator = new ReportGenerator(printer);\n        reportGenerator.generate();\n    }\n}

After doing so, the compile time dependency graph would look like this

Outcome

After applying the Inversion of Control principle, our main project doesn't depend on external tools anymore. Hence, replacing the printer with another implementation of the same service requires a minimal change of code to be done at the level of the solution architecture.

","createdDate":"2022-05-25T19:25:11.759","lastModifiedDate":"2022-05-27T09:52:46.196","keywords":["Inversion of Control","Dependency Injection","Java"]},{"id":"DnKAx89ml5q","title":"You Cannot SIGINT a Background Process","description":"There are many ways to kill a process in UNIX. There are flags to kill it gracefully, without curropting the resources it is accessing, and others to forcibly terminate it, especially used if the process is hanging. However, not all flags can be used with all processes.","estimatedReadingTime":"2 min read","categoryId":"devops","thumbnailImageUrl":null,"content":"

UNIX Kill Flags

To start, let's go over the flags that are available in UNIX's kill command. When enter the following command in a UNIX shell, you get the following list of flags that can be sent to a process:

kill -l

\"Why
 

The most relevant flags are the ones from 1 → 15. However, we won't go over them all. To summarize some:

SIGINT

Equivalent to pressing CTRL + C in a shell. It is used to terminate a process gracefully.

SIGQUIT

Equivalent to pressing CTRL + / in a shell.

SIGKILL

Forcibly terminate the process without waiting for the resources to be set free or the children processes to be killed.

SIGTERM

Ensures that the process will be killed alongside its children processes. To note here, that Java's UNIX implementation of java.lang.Process.destroy() uses this flag.

How to Send a Flag to a Process

To send one of the previous flags to a process which you know its pid (process ID), you can do one of the following:

kill -{number} pid    # e.g. kill -2 16342\nkill -SIG{signal} pid # e.g. kill -SIGINT 16342\nkill -{signal} pid    # e.g. kill -INT 16342

However, not all flags can be used with any process! For example, SIGINT, which is the equivalent of CTRL + C for graceful termination, cannot be used with background processes.

How to Know if a Process is a Background Process

When you look for processes in UNIX, using the following command:

ps aux

You will get a list similar to this:

\"What
 

In the 8th column, the one with the label STAT, gives us a summary of the status of the process. Foreground processes contain the plus \"+\" sign in their status, while background processes do not. For more information about the translation of the characters in the process status, check this article.

How to Send SIGINT Flag to a Background Process?

Although it is hard to do so, you may still be able to send the SIGINT flag to a background process in one of the following ways:

1. If the process has a process group ID (pgid), you can send the flag directly to the group, and it gets delegated to all the processes in this group. You can do so as follows:

kill -SIGINT -{pgid} # e.g. kill -SIGINT -87341

2. You can move a background process to the foreground if you are in the same session where the job got started from. To do you will need to use the fg UNIX command.

","createdDate":"2022-05-25T14:55:22.547","lastModifiedDate":"2022-05-25T14:56:17.509","keywords":["UNIX","SIGINT","Background Process"]},{"id":"eLo7dwyxVzP","title":"C# Features I'd Like to See in Java","description":"Java and C# look alike to some extent, however, in the recent few years, the two languages have diverged at different paces and directions. So, the old opinion about the difficulty to tell code from the two languages apart doesn't hold anymore, or at least to me. C# has been growing and shaping up at a faster and more focused pace, and in this article, I'll go over C# features that Java would be better by having.","estimatedReadingTime":"7 min read","categoryId":"software-development","thumbnailImageUrl":"https://cdn.hdwebsoft.com/wp-content/uploads/2021/07/java-vs-c-sharp-dot-net.png","content":"

I'll go over the features one by one, stating the C# version where it got introduced in. Noting that the current version of C# as of writing the article is C# 10.0, with a preview version of C# 11.0, as a new version is being released every year since C# 7.0 (2017). On the other hand, Java's current latest release is Java 17, with two releases per year for almost 3 years.

1. Object Initializer (C# 3.0)

This feature gives you the freedom of initializing the value of an object's fields upon instantiation without having to pass them to the constructor. You are free to initialize the fields you want and leave the ones unnecessary, and the best part is that you can still pass parameters to the constructor at the same time. Without further ado, let's jump to the code:

public class Employee {\n    private const int DEFAULT_SALARY = 1000;\n    \n    public string Name { get; set; }\n    public string Department { get; set; }\n    public float Salary { get; }\n\n    public Employee(float salaryFactor)\n    {\n        Salary = DEFAULT_SALARY * salaryFactor;\n    }\n    \n    public Employee()\n    {\n        Salary = DEFAULT_SALARY;\n    }\n}

Take the above class into consideration. It has a field that gets initialized in the constructor based on the value of the parameter. Eventhough we didn't need two constructors, where we could've taken advantage of the optional parameters features, but this would feed our case, so here it is.

var employee = new Employee(1.2) { Name = \"Joe Rico\", Department = \"DevOps\" };\nvar defaultEmployee = new Employee() { Name = \"David Hill\", Department = \"QA\" };

Look how elegant and readable this is! I actually prefer it over having to pass the values to the constructor, as adding and removing fields won't be troublesome. In Java, on the other hand, it is very common to use the builder pattern, which is not specific to the language by the way. However, Object Initializer, in addition to being cleaner than the builder pattern, is natively supported by the language.

2. Properties (C# 1.0)

This must be the coolest feature for the language to ever have. We have seen properties in our previous feature's code, however, I called them \"fields\" to avoid confusion. Before explaining what a property is, let's see a simple example of it:

public class TimePeriod\n{\n    public double Hours { get; set; }\n}

In the above code, the class TimePeriod has a property named Hours. A property is basically a wrapper for a field with its getter and setter. The awesome part is that the behavior of these functions is set by default since, in most cases, their logic is the same. However, you can override this behavior as follows:

public class TimePeriod\n{\n    private double seconds;\n\n    public double Hours\n    {\n        get { return seconds / 3600; }\n        set {\n            if (value < 0 || value > 24)\n                throw new ArgumentOutOfRangeException(\n                    $\"{nameof(value)} must be between 0 and 24.\");\n\n            seconds = value * 3600;\n        }\n    }\n}

If you thought this feature can't get cooler, you couldn't be more wrong, because you can also:

public class TimePeriod\n{\n    public double Hours { get; set; } = 10;\n}

Provide a property with a default value.

public class TimePeriod\n{\n    public double Hours { get; private set; }\n}

Add an access modifier to either modifier (getter or setter) and define its scope of accessibility 

public class TimePeriod\n{\n    public double Hours { get; }\n}

Make a property read-only, where you can only give it a value in the constructor.

public class TimePeriod\n{\n    public double Hours { get; } = 2;\n}

Or you can also make it read-only with a default value!

Of course, Java has some \"equivalents\" to C# properties, like the annotations @Getter, @Setter, or @Data, however, these are not natively supported by the language, and they add an ugly wrapper to your class/fields.

3. Structs (C# 1.0)

Have you ever asked yourself why you need to use the class Integer (with the capital I) in Java when creating a list of integers and not simply List<int>? The answer is because primitive data types in Java cannot be used alongside classes this way. To solve this issue, C# went with structs: value types that can have methods. The difference between value and reference types is that they cannot be reassigned to a new value. That's why in C# we can do the following:

var numbers = new List<int>();\nvar one = int.Parse(\"1\");

4. Operators! (C# 1.0)

Overloading operators is one of the basic features any language should have. However, I still don't understand why Java doesn't support it! To overload an operator in C#, you do it like this:

public class TimePeriod\n{\n    public double Hours { get; set; }\n\n    public static TimePeriod operator +(TimePeriod operandOne, TimePeriod operandTwo)\n    {\n        return new TimePeriod() { Hours = operandOne.Hours + operandTwo.Hours };\n    }\n}\n\nvar timePeriod1 = new TimePeriod() { Hours = 2 };\nvar timePeriod2 = new TimePeriod() { Hours = 6 };\nvar timePeriod3 = timePeriod1 + timePeriod2;

5. Nullable Value Types (C# 2.0)

If you're looking for the index of an object in a list, what should you return if the object was not found? -1? throw an exception? well, both options are commonly used, but doesn't it make more sense to return null?! C# introduced the feature where value types (int, bool, etc.) can have a null value! For a value type to be nullable, it needs to have a question mark after its type:

int? index = null;

6. Nullable Reference Types! (C# 8.0)

Don't you hate it when you're mid-way developing your application, and you get a null pointer exception out of nowhere? It would be a lot easier if there were type-safety. Java's solution to such a problem is with using Optionals. As much as this does the job, it really gets messy with Optionals all over the place. To tackle this, C# adopted the nullable reference type feature like in some other languages such as Kotlin. It also denotes the usage of the question mark to refer to nullability. However, breaking this rule is not yet considered an error, where only warnings will be output as there's a need to keep the language backward compatible.

string? nullableString = null;\nstring nonNullableString = null; // works for now but creates a warning

7. Null Propagators (C# 6.0)

Another cool feature to avoid null pointer exceptions without creating a mess in the code is using the null propagators. Of course, this feature is not C# specific, but having it adopted by the language is pretty awesome!

int? stringLength = nullableString?.length; // gets assigned to null if nullableString is null

8. Extension Methods (C# 3.0)

This is by far, the most underrated feature in C#. It gives you the ability to extend a class from a different or the same codebase. The best part is that the class doesn't have to be owned by you! For example, imagine I want to extend the int struct and add to it a method that checks if the integer is even. To do using this feature, you need to follow the example:

// the class has to be static. The name is irrelevant\npublic static class IntegerUtilities {\n\n    // the method has to be static, and the keyword `this` \n    // should precede the variable whose class we are extending\n    public static bool IsEven(this int number)\n    {\n        return number % 2 == 0;\n    }\n}\n\nvar counter = 10;\nCosnole.WriteLine(counter.IsEven()); // true

9. Named and Optional Arguments (C# 4.0)

This is another feature that I don't understand why Java hasn't adopted yet. In C#, you can do it like this:

public void ExampleMethod(int required, string optionalString = \"default value\", int optionalInt = 2)\n{\n    \n}\n\nExampleMethod(4, optionalInt: 0);
","createdDate":"2022-05-13T23:30:56.060","lastModifiedDate":"2022-07-06T18:48:40.876","keywords":["Java","C#"]},{"id":"WnMZd0XdePk","title":"JWT Authentication with .NET","description":"JWT or JSON Web Token authentication is a very trending authentication mechanism to use with APIs. However, implementing a simple JWT scheme with minimal capabilities can be triggering as you may not want to go with third-party solutions. In this article, we will go over the simplest implementation of JWT authentication in .NET","estimatedReadingTime":"3 min read","categoryId":"software-development","thumbnailImageUrl":"https://www.bariskisir.com/static/img/posts/jwt-logo.jpg","content":"

Motivation

JASON Web Token Authentication is not easy to implement, especially with all the features that come with any identity scheme such as roles, scopes, permissions, etc. For that, there are several third-party libraries that help provide all the needed services, one of which is IdentityServer provided by Duende Software. However, going with such as approach might be an overkill for your app, especially if you are not planning to benefit from all the features they offer. So, if all you need is a simple authentication scheme over the network, this article is for you.

Prerequisite

For this tutorial, we will need a working .NET project or more, as JWT authentication can cover multiple projects. I will be using .NET 6, but feel free to use any other version as long as you are able to maintain the version differences.

I will assume that you have multiple projects you want to apply authentication to. If not, the documentation will still work, but it will be simpler for you to implement.
 

I will be using MongoDB for this documentation, as I find it more needed than SQL databases, but feel free to another DB vendor as this is just a detail,

Server Project

Needed Packages

First, you need to add the needed dependencies to your project. For the server, there is only one needed package, and it is specific to the DB vendor you're using. Since I'm using MongoDB, the package to be added is AspNetCore.Identity.Mongo. If you're using MSSQL, then you'll need the packages Microsoft.AspNetCore.Identity.EntityFrameworkCore and Microsoft.AspNetCore.ApiAuthorization.IdentityServer which are provided by the Microsoft.
 

Setting Up Identity

In your Program.cs (or if you're using .NET prior to 6, then it's Startup.cs) class add the following code:

builder.Services.AddIdentityMongoDbProvider<User>(_ => { }, mongo =>\n    {\n        mongo.ConnectionString = \"connectionString\";\n    })\n    .AddDefaultTokenProviders();\n\nbuilder.Services.AddAuthentication(options =>\n    {\n        options.DefaultAuthenticateScheme = JwtBearerDefaults.AuthenticationScheme;\n        options.DefaultChallengeScheme = JwtBearerDefaults.AuthenticationScheme;\n        options.DefaultScheme = JwtBearerDefaults.AuthenticationScheme;\n    })\n    .AddJwtBearer(options =>\n    {\n        options.SaveToken = true;\n        options.RequireHttpsMetadata = true;\n        options.TokenValidationParameters = new TokenValidationParameters\n        {\n            ValidateAudience = false,\n            ValidIssuer = \"https://localhost\",\n            ValidateIssuerSigningKey = true,\n            IssuerSigningKey = new SymmetricSecurityKey(Encoding.UTF8.GetBytes(\"secret\"))\n        };\n    });

Note: don't forget to change the issuer and the secret as you need. You can also apply additional configuration to the options, so feel free to check them up.

Also, you need to add the following lines before app.MapControllers()

app.UseAuthentication();\napp.UseAuthorization();

If your server project is also your client, then you are set to go. However, if you will be consuming the access token in another project on more, then you need to apply the following to each on of them:

Client Project

Needed Package

You will need to add the package Microsoft.AspNetCore.Authentication.JwtBearer to your project dependencies.
 

Setting Up Identity

As we did in the server, we will be adding the following code to Program.cs

builder.Services.AddAuthentication(options =>\n    {\n        options.DefaultAuthenticateScheme = JwtBearerDefaults.AuthenticationScheme;\n        options.DefaultChallengeScheme = JwtBearerDefaults.AuthenticationScheme;\n        options.DefaultScheme = JwtBearerDefaults.AuthenticationScheme;\n    })\n    .AddJwtBearer(options =>\n    {\n        options.SaveToken = true;\n        options.RequireHttpsMetadata = true;\n        options.TokenValidationParameters = new TokenValidationParameters\n        {\n            ValidateAudience = false,\n            ValidIssuer = \"https://localhost\",\n            ValidateIssuerSigningKey = true,\n            IssuerSigningKey = new SymmetricSecurityKey(Encoding.UTF8.GetBytes(\"secret\"))\n        };\n    });\n\napp.UseAuthentication();\napp.UseAuthorization();

Protecting Controllers

The last step would be to specify which controllers or endpoints you want to protect by the access token. To do so, add the attribute [Authorize] over the whole class if you want to protect all your controller's endpoints, or above each one of the needed endpoints

[Authorize]\n[ApiController]\n[Route(\"[controller]\")]\npublic class ArticleController : ControllerBase\n{\n    ...\n}
","createdDate":"2022-05-08T12:59:13.029","lastModifiedDate":"2022-05-08T12:59:13.029","keywords":[".NET","JWT Authentication","Identity"]},{"id":"l49Wd58mQkX","title":"NGINX as a Reverse Proxy With Docker","description":"If you're following the microservices architecture, or at least you have several endpoints for the backend to maintain, you may want to use reverse proxy, and NGINX got you. In the following tutorial we will go over how to use it alongside Docker and get it up and running.","estimatedReadingTime":"4 min read","categoryId":"devops","thumbnailImageUrl":"https://miro.medium.com/max/1400/1*FcigeCUocGksT_eaQ4JH9w.png","content":"

Motivation

You may want to use a reverse proxy/ API gateway in your project for the following reasons:

  • you don't want to expose your endpoints to the client. Instead, it is preferable to have a single entry point address that hides all the internal endpoints behind it.
  • changing your endpoints or moving them around won't be an issue for the client anymore, as it will all be the proxy's job to maintain those endpoints.

NGINX Docker Container

First, you need to create a container to play the role of the proxy. We do that by running NGINX's alpine image, but we want to configure it, so we will be building our custom image on top of theirs, and there we will be able to pass our configurations.

FROM nginx:alpine\n\nCOPY ./nginx.conf /etc/nginx/nginx.conf

NGINX Configuration

To configure NGINX, we need to create a configuration file named `nginx.conf`. Its content will be:

worker_processes auto;\nevents { }\n\nhttp {\n    server {\n        listen  80;\n        server_name localhost;\n        return 301 https://$host$request_uri;\n    }\n    \n    server {\n        listen 80;\n        server_name _;\n        \n        location / {\n            proxy_pass http://ui;\n        }\n\n        location /service1/ {\n            proxy_pass https://service1/;\n        }\n        \n        location /service2/path/ {\n            proxy_pass https://service2/;\n        }   \n        \n    }\n}\n

In the above configuration, we are:

  • listening to port 80
  • defining the default path (with a slash '/')
  • defining all the paths that we want to create proxies for
  • passing the call to the defined proxies

Defining a proxy

To define a proxy:

  1. specify the path that the gateway will receive and pass it to the hidden service
  2. pass it to the address of the service. If you are using Docker Compose, you can just specify the hostname of the service used in docker-compose.yml

The syntax goes as follows:

location /path_gateway_will_receive/ {\n    proxy_pass https://name-of-service/;\n}

Note: be very careful with the slashes. Make sure to use them like in the example above.

Using SSL/TLS Certificate

If you want your backend to support HTTPS, it is enough to provide an SSL certificate for the API gateway. Other than that, all the calls between the agent and the services are internal and secure by definition.

Generating the certificate

To generate a certificate, run the following command in your UNIX shell:
 

docker run --rm -p 80:80 -p 443:443 \\\n    -v /root/nginx/letsencrypt:/etc/letsencrypt \\\n    certbot/certbot certonly -d {your_domain} \\\n    --standalone -m {your_email_address} --agree-tos

The above command runs a container of certbot docker image which generates the certificate. Make sure to change the values of the domain and the email to match yours.

After you run the command, you will get a folder /root/nginx/letsencrypt/live/{your_domain}/ which contains files:

  • cert.pem
  • chain.pem
  • fullchain.pem
  • privkey.pem

These files, together, represent your certificate.

We also need to create a Diffie-Hellman Parameter that enhances the security of our project. To generate it you need to run the following command

openssl dhparam -out /root/nginx/dhparam.pem 4096

Then you will need to re-configure NGINX to:

  • listen to port 443 alongside 80, since the first one is the default one for HTTPS calls
  • add the paths for our generated files 

To do so, edit nginx.conf as follows:

worker_processes auto;\nevents { }\n\nhttp {\n    server {\n        listen  80;\n        server_name localhost;\n        return 301 https://$host$request_uri;\n    }\n    \n    server {\n        listen 80;\n        listen 443;\n        server_name _;\n\n        ssl_certificate /etc/letsencrypt/live/mohammed.ezzedine.me/fullchain.pem;\n        ssl_certificate_key /etc/letsencrypt/live/mohammed.ezzedine.me/privkey.pem;\n        ssl_dhparam /etc/ssl/certs/dhparam.pem;\n        add_header Strict-Transport-Security \"max-age=63072000; includeSubdomains\";\n        ssl_trusted_certificate /etc/letsencrypt/live/mohammed.ezzedine.me/fullchain.pem;\n\n        ssl on;\n        \n        location / {\n            proxy_pass http://ui;\n        }\n\n        location /service1/ {\n            proxy_pass https://service1/;\n        }\n        \n        location /service2/path/ {\n            proxy_pass https://service2/;\n        }     \n        \n    }\n}\n

Docker Compose

Docker Compose is a recommended tool to orchestrate your containers. The following docker-compose.yml file does the job for our purpose

version: '3.9'\n\nservices:\n  service1:\n    build: path/to/service1\n    hostname: service1\n    networks:\n      - gateway-internal\n        \n  service2:\n    build: path/to/service2\n    hostname: service2\n    networks:\n      - gateway-internal\n    \n  ui:\n    build: path/to/ui\n    hostname: ui\n    networks:\n      - client-gateway\n      \n  proxy:\n    build: path/to/proxy\n    hostname: proxy\n    networks:\n      - client-gateway\n      - gateway-internal\n    ports:\n      - \"80:80\"\n      - \"443:443\"\n    volumes:\n      - ./cert/nginx/letsencrypt:/etc/letsencrypt\n      - ./cert/nginx/dhparam.pem:/etc/ssl/certs/dhparam.pem\n        \nnetworks:\n  client-gateway:\n    name: client-gateway-network\n  gateway-internal:\n    name: gateway-internal-network
","createdDate":"2022-05-07T22:50:44.481","lastModifiedDate":"2022-05-07T22:50:44.481","keywords":["NGINX","Docker","Container","HTTPS","SSL","API Gateway","Reverse-Proxy"]},{"id":"e67QgWvd1DR","title":"Node.js in a Docker Container","description":"With the trend of containerizing software applications, we will go over how to run a node project in a Docker container.","estimatedReadingTime":"3 min read","categoryId":"devops","thumbnailImageUrl":"https://miro.medium.com/max/646/1*epfvG4ZmlzhhNCBPvFgC9A.png","content":"

Motivation

Eventhough running a Node.js project in a docker container might sound trivial, however, making it work out is so not. To start, we are assuming that you already have a Node.js project ready for this tutorial. 

Containerizing Node.js Project

The first step in containerizing any project is by adding a file to build its image. When using docker, this file is Dockerfile. So, add the following `Dockerfile` (without any extension) to your project.

FROM node:16.14.0-alpine AS build\n\nRUN mkdir -p /app\nWORKDIR /app\n\nCOPY package*.json /app/\nRUN npm install\n\nCOPY . /app/\nRUN npm run build\n\nFROM nginx:alpine\n\nCOPY ./default.conf /etc/nginx/conf.d/default.conf\nCOPY --from=build /app/dist/{project_name}/usr/share/nginx/html\n\nEXPOSE 80

As you may have noticed, this file follows the Docker Builder Pattern, which helps in reducing the size of the final image to limit its content to only contain the needed files.

In the first stage of the build (lines 1 to 10) where pulling node's base image and building our project. I chose the alpine version of node 16.14.0 which is a lighter edition of it. The output of this stage is a group of files that will constitute our final image's content, and it will be stored under the directory /app/dist/{project_name} of the file system of this stage (not visible in your local machine).

After we built our project, we need a webserver to run it in. For this tutorial I chose NGINX.

In the second stage (lines 12 to19), we are:

  1. pulling NGINX's alpine image
  2. copying NGINX's configuration file `default.conf` which we will talk about in a bit to it's required directory in our image
  3. copying the output of the build done in the build stage to NGINX's web root directory.
  4. exposing the port 80

Configuring NGINX

NGINX is more than just a web server, it can be used as a reverse proxy, load balancer, etc. Whichever you're using NGINX for, you will probably need to configure some of its behavior. 

In our example, we are using it as a web server. Without the line 14 of the Dockerfile, our web application will not have the best experience on refresh, as it will redirect us to 404 code error. To solve it, we need to create file named `default.conf` and copy it to the image as we are doing in line 14 of Dockerfile. The content of default.conf is below:

server {\n  listen 80;\n  root /usr/share/nginx/html;\n\n  location / {\n    try_files $uri $uri/ /index.html =404;\n  }\n}\n

In the above configuration, we are:

  1. listening on port 80
  2. specifying the webroot path, where our build content lives in
  3. adding a rule for all the paths in order to avoid immediately being redirected to the 404 error page.

Running the Container

To run the container, you can run the following commands:

docker build -i image_name .\ndocker run -dp 8080:80 image_name 
","createdDate":"2022-05-07T15:58:39.874","lastModifiedDate":"2022-05-07T22:58:24.054","keywords":["Docker","Node.js","NGINX","Container"]},{"id":"noG1gljma2p","title":"Deploying Docker Containers With DigitalOcean","description":"Running different technologies has been made easier by using Docker, where you are not coupled to what the hosting server offer's you anymore, all you need is a place that runs Docker containers. Digital is a very good place to do so, with a very reasonable pricing.","estimatedReadingTime":"3 min read","categoryId":"devops","thumbnailImageUrl":"https://collabnix.com/wp-content/uploads/2020/01/image-13.png","content":"

Prerequisite

In this article, we assume that you already have a docker image(s) that runs locally on your machine. In this article, we will be running it on a public server that is accessible from the browser.

DigitalOcean

DigitialOcean is a very straightforward and easy-to-use hosting server, with very reasonable prices. What we will be doing is basically renting a virtual machine on their servers, where we will have root access to its shell. Then we will open an SSH connection to it, copy our dumps, and run our container from the command line.

Setting up your account

  1. First, you need to register at their platform. You can do it from here.
  2. Then, you will need to create a new project from the left sidebar.
  3. After you have your project set up, you then need to create a droplet. It is DigitalOcean's notion of a virtual server. For the choice of OS, I went with Ubuntu, with the basic plan ($5/month as of writing this article).
    1. During the creation of the droplet, you'll be asked to generate an SSH key. Do so, by following the steps shown in the popup window, as this will be your way in from your local machine to the remote one.

After setting up your droplet, you'll be able to see it as follows:

\"\"
 

The IP address that you see is the public address that you can use to access the server from the browser. For now, it is still not bound to any entry point.

Adding you domain

If you have an available domain name that you would like to bind to the above IP address, you can click on the ⋯ on the right of the droplet > Add a domain.

You need to add the domain name in the portal, then go to your domain name provider (e.g. Namecheap) and add the following nameservers in the domain's DNS configurations:

  • ns1.digitalocean.com.
  • ns2.digitalocean.com.
  • ns3.digitalocean.com.

This configuration redirects the domain to DigitalOcean's DNS servers.

Connecting to the shell

Now that you have your droplet set and ready, you need to connect to its shell to do all the magic. You can do so by either opening an SSH connection from your local machine (the one you used to generate the SSH key earlier) to the remote machine, or by directly opening a console from the browser by clicking on the ⋯ on the right of the droplet > Access console.

Open SSH connection

However, if you wanna do it in a more professional way:

  1. Open a UNIX shell on your local machine
  2. Enter the following command:
ssh root@{droplet_ip_address}

You will be asked to enter the passphrase you used when generating the SSH key. This proves that you are not trying to compromise someone else's identity.

Setting up Docker

Now that you have access to the shell, it is a computer by itself, so you must treat it as one. You may want to install some dependencies related to Docker and Docker Compose based on your need.

Then copy your docker-compose.yml to the machine and run the command:

docker-compose up -d

or you may want to run the docker container directly from the Docker CLI.

Container Registry

You may need a place to store your Docker images and pulling from. Consider Docker Registry as it is a free platform just for this.

","createdDate":"2022-05-06T18:24:30.724","lastModifiedDate":"2022-05-06T18:24:30.724","keywords":["Docker","Container","Hosting","DigitalOcean"]},{"id":"R6ZBgylde28","title":".NET, Docker & SSL","description":"In this article, we will go over how to run a .NET docker container with a free self-signed generated SSL certificate over HTTPS","estimatedReadingTime":"3 min read","categoryId":"devops","thumbnailImageUrl":"https://github.com/mohammed-ezzedine/public-assets/blob/main/articles/dotnet%20docker%20ssl.jpg?raw=true","content":"

Prerequisite

This tutorial assumes that:

  • you already have a .NET project under development. It is preferred to be .NET 6, but feel free to choose any other version as long as you make sure to configure the versions in the Dockerfile
  • you have access to a UNIX machine (WSL also works)

Containerizing .NET Project

The first thing we need to do is configure our project to run in a container. We will be using docker for this.

To do so, we need to add a file named \"Dockerfile\" (with no extension) at the level of the project:

FROM mcr.microsoft.com/dotnet/aspnet:6.0 AS base\nWORKDIR /app\nEXPOSE 80\nEXPOSE 443\n\nFROM mcr.microsoft.com/dotnet/sdk:6.0 AS build\nWORKDIR /src\nCOPY [\"Solution/Project.csproj\", \"Solution/\"]\nRUN dotnet restore \"Solution/Project.csproj\"\nCOPY . .\nWORKDIR \"/src/Solution\"\nRUN dotnet build \"Project.csproj\" -c Release -o /app/build\n\nFROM build AS publish\nRUN dotnet publish \"Project.csproj\" -c Release -o /app/publish\n\nFROM base AS final\nWORKDIR /app\nCOPY --from=publish /app/publish .\nENTRYPOINT [\"dotnet\", \"Project.dll\"]

One thing to notice in the first part of the above file (lines 1 to 4) is that we are exposing two ports: 80 (for HTTP requests) and 443 (for HTTPS). Also, make sure to change the base image version if you are not using .NET 6.

In the second part of it (lines 6 to 12), we are building the solution. Notice here that we're using the Docker Builder Pattern. This helps reduce the final image's size, so we only keep what is needed.

In the third part, we are running `dotnet publish`, which prepares our project for deployment.

In the last part, we are copying the result of the previous step, which will constitute our final image's content.

Note: make sure to rename \"Solution\" and \"Project\" to your solution and project's names.

We can now build and run our image, however, it will only run over HTTP, because we haven't created an SSL certificate for it yet.

Generating SSL/TLS certificate using Let's Encrypt

Let's Encrypt is a free process to generate an SSL certificate. Note here that their certificates are valid for 90 days only, so you will need to renew them roughly every 3 months.

To generate a certificate, run the following command in your UNIX shell:

docker run --rm -p 80:80 -p 443:443 \\\n    -v /root/nginx/letsencrypt:/etc/letsencrypt \\\n    certbot/certbot certonly -d {your_domain} \\\n    --standalone -m {your_email_address} --agree-tos

The above command runs a container of certbot docker image which generates the certificate. Make sure to change the values of the domain and the email to match yours.

After you run the command, you will get a folder /root/nginx/letsencrypt/live/{your_domain}/ which contains files:

  • cert.pem
  • chain.pem
  • fullchain.pem
  • privkey.pem

These files, together, represent your certificate.

Docker Compose

We will be using docker-compose to orchestrate the container's lifetime. To do so, add the following docker-compose.yml file

version: '3.9'\n\nservices:\n  dotnet-app:\n    build: path/to/dotnet/Dockerfile\n    hostname: dotnet\n    container_name: dotnet\n    ports:\n      - \"8080:80\"\n      - \"8081:443\"\n    volumes:\n      - ./cert/nginx/letsencrypt/live/{your_domain}:/https/\n    environment:\n      - ASPNETCORE_URLS=https://+:443;http://+:80\n      - ASPNETCORE_Kestrel__Certificates__Default__Path=/https/fullchain.pem\n      - ASPNETCORE_Kestrel__Certificates__Default__KeyPath=/https/privkey.pem

Running Docker Compose

Now, to run your project:

docker-compose up -d

You'll project will then run on http://localhost:8080 and https://localhost:8081 

","createdDate":"2022-05-06T10:49:37.558","lastModifiedDate":"2022-05-07T15:25:50.029","keywords":["Docker","SSL","HTTPS",".NET","Let's Encrypt"]},{"id":"BlvadQNgEWN","title":"Things to Learn Early in Your Software Career","description":"As a software engineer, you don't get to learn much of useful stuff in college. So if you are a fresh graduate or you're still in your early stage of your career, you might be interested in learning about revolutionizing topics in this field. In this article we will talk about few of these hot topics.","estimatedReadingTime":"6 min read","categoryId":"software-development","thumbnailImageUrl":"https://i.imgur.com/T0l215A.jpg?fb","content":"

Test-Driven Development (TDD)

Before defining what TDD is, we should emphasize the importance of writing tests for your software. Even though it may sound very intuitive that tests are essential for any program, developers tend to avoid writing them claiming that they trust their work and that testing it manually is enough to ensure that is working \"just fine\".

Another problem that arises when writing tests, is the quality of these tests. If your test suite (and this is a name often called on your test set) doesn't make you feel comfortable changing anything in the code with certainty that you haven't broken any of it, then these tests are not enough.

This is where TDD Comes in hand. Test-Driven Development is the behavior of writing the tests first; i.e. before writing the code! As strange as it might sound, TDD is a well-known and well-used behavior, and it is effective to reach a high-quality set of tests.

The idea of writing tests first goes as follows:

  1. you write the minimum amount of tests needed to make it fail (and compilation error is considered a failure here)
  2. you then write the minimum amount of code needed to make the tests pass

You simply keep on repeating steps 1 and 2 until the development is done. Then you'll end up with the following:

  1. code that is fully tested 
  2. high-quality test suite by ensuring that your tests cover all the possible scenarios of execution
  3. tests that qualify as a documented entry point for the reader to understand what your code does

What the last is about is that your tests will eventually cover things as simple as how to create an instance of a class, growing up in complexity to reach the most sophisticated operations done by the program.

Domain-Driven Design (DDD)

The motivation behind DDD is that the architecture of your project should reflect the domain of the project rather than the tools or frameworks you used to develop it. For instance, let's take the following solution structure:

\"Introduction
 

This photo shouts saying \"I am a .NET project. I am a web project. I use MVC architecture...\" well, yeah, but what are you about? no idea...

What DDD suggests is the following:

  1. the framework you are using is just a tool, which your project should not be dependent on
  2. the structure of your project should be reflective of what the project does and is about
  3. separation of concerns can be achieved through the Onion Architecture 

The Onion Architecture

It is a layered software architecture that implies separation of concerns through a clean distributed vision. There are different versions of it, however, they don't differ at the level of the core principles behind it. A simple version of the onion architecture is shown in the figure below:

\"GitHub
 

As you can see, the architecture diagram shows a layered structure similar to the one of the onion, thus the name: onion architecture. This architecture states the following:

The domain level

  • it is where you define your models and domain-related classes; e.g. if you are working on software for a library, here is where you add the classes that represent books, copies, shelves, etc.
  • it doesn't depend, at compile-time, on any other part of the project.
  • it doesn't contain any details related to other levels, such as databases, presentation infrastructure (web, mobile application, etc.). This ensures ease of modification in the future.

The application level

  • it is where you define the core logic related to the operations to be acted on the domain; i.e. creation of instances, updating objects states, etc.
  • it depends directly on the domain level only. Notice that this layer is agnostic on what type of persistence mechanism is being used.

The infrastructure level

  • this is where you implement your persistence mechanism if needed.
  • it only depends on the application layer
  • it should be isolated enough to make it easy in the future for modification; e.g. if you want to replace a DB vendor with another one.

The presentation level

  • this defines how you will be presenting the data to the user. It can be a web application, desktop application, etc. Notice here how this became a minor detail, as opposed to how it was the core of the project in the structure shown earlier

 

Here is another project structure following the onion architecture:

\"\"
 

As you can see:

  • the domain project contains our core classes
  • the application project contains services that act on the objects' state, and it defines the interfaces expected from the persistence layer to implement, they are located in the Persistence folder.
  • the persistence project contains the repositories and the entities representations of the core classes. These entities can be coupled to the DB vendor being used, as it is easy to replace.
  • the presentation project, in this case, is a web application, which is a minor detail that doesn't need to be under the spotlight.
  • the main project is where we connect it all together.

Containerization

Another important tool to become independent from your infrastructure is containerization, but before digging into its concepts, let's go over some scenarios. When working on a new project, you will need to consider shipping it and making it available for production. Developers often choose their tech stack based on what is available in the market for hosting servers and whatsoever, and this is wrong behavior. By doing so, you are coupling yourself to a service offered by a third party! What you should be doing is choosing your tools based on what most fits your solution, then worry about finding a good hosting service that fits what you chose. However, the problem might be that you may not find something in your budget range. Here is where containerization comes into action.

Containerization is the concept of running your project in a container (similar to a virtual machine); i.e. when you need to ship it, you deploy the whole container containing your project and all that it needs to run, such as OS, infrastructure, and other dependencies. This way, you only worry about finding a hosting service that supports deploying containers, and there are many.

Containerization also solves many other issues, such as having the same experience when running the project in development mode or in production, since in all cases, the container is agnostic of the physical machine's infrastructure.

Docker is one of the best active solutions to achieve containerization, so learning it early in your career will help you a lot with your future work.

","createdDate":"2022-04-23T14:23:19.539","lastModifiedDate":"2022-04-24T01:38:17.405","keywords":["Software Engineering","Career","Test-Driven Development","Domain-Driven Design","Docker","Container"]},{"id":"52JDmeXxvj8","title":"Multi-Tenancy in Spring Boot and Consul [Database Per Tenant]","description":"Multi-Tenancy is a great demand when developing SaaS. Where there are different approaches to achieve it, in this article, I will go over the approach of creating a database for each tenant.","estimatedReadingTime":"5 min read","categoryId":"software-development","thumbnailImageUrl":null,"content":"

Motivation

One of the biggest trends in the tech field currently is Software-as-a-Service (SaaS). One of the requirements of such a trend is the support for multiple types of users and the ability to customize the output screen based on the type of the signed-in user. This differs from the issue of authorization since the filtering here is based on the client/company the user belongs to, rather than the role or permissions they have been provided with. The very common way to solve this is multi-tenancy, by which the platform has multiple tenants, each corresponding to a client, and the user will be acting on the tenant that belongs to them.

Multi-Tenancy

As complicated as it may sound, the technicality behind multi-tenancy is as simple as having multiple data sources, each belonging to a tenant/client, then whenever we need to query a database, choose the data source belonging to the client issued this query.

On the ground, there are three ways to implement such behavior:

  1. Shared database, shared schema: where all the data live in the same physical database and share the schema, where we will have a foreign key appended to each data entry indicating the tenant id it belongs to.
    1. Pros:
      1. Easy to build maintain
      2. Very cost-effective
    2. Cons:
      1. Not secure enough
  2. Shared database, separate schema: the data in this approach will still live in the same physical database, however, each will have its own schema. When we want to query the database, we have to choose which schema to connect to based on the client issuing the query.
    1. Pros:
      1. More security
      2. Easy to build
    2. Cons:
      1. Hard to scale
  3. Database per Tenant: each tenant in this scenario will have its own separate database.
    1. Pros:
      1. Most reached security
    2. Cons:
      1. Very costly

In this post, we will cover how to implement the third approach in Spring Boot and Consul. (I assume that you already have a project set up with the needed dependencies: Spring Boot and Consul)

Data Configuration

The challenge of implementing multi-tenancy boils down to one simple task: connect to a database at runtime based on the request I get. If we want to do it manually, we would need to create a driver instance each time a request comes in and provide it with the connection string of the database that is relevant. However, in Spring Boot, it can be done in a more isolated way.

TenantContext class

We will store the tenant ID of the active request in a thread-local storage registry. This is safe to implement since the scope of a request (HTTP or gRPC) is bounded to the scope of the thread.

@Component\npublic class TenantContext {\n \n    private final String DEFAULT = \"default_tenant\";\n    private final ThreadLocal<String> TENANT_ID = ThreadLocal.withInitial(() -> DEFAULT);\n \n    public void setTenantId(String id) {\n        TENANT_ID.set(id);\n    }\n \n    public String getTenantId() {\n        return TENANT_ID.get();\n    }\n \n    public void clear() {\n        TENANT_ID.set(DEFAULT);\n    }\n}

DataSourceConfigration class

We will need to add the following class to provide the needed configuration for the JPA data source.

@Configuration\n@AllArgsConstructor\npublic class DataSourceConfiguration {\n \n    private final TenantContext tenantContext;\n \n    @Bean\n    public DataSource getDataSource() {\n        return new MultiTenantDataSource(tenantContext);\n    }\n}

This configuration provides a bean for the DataSource class, making it possible for it to be maintained at runtime.

MultiTenantDataSource class

The implementation of the MultiTenantDataSource class follows:

public class MultiTenantDataSource extends AbstractRoutingDataSource {\n \n    private final TenantContext tenantContext;\n \n    public MultiTenantDataSource(TenantContext tenantContext) {\n        this.tenantContext = tenantContext;\n         \n        Map<Object, Object> tenantsDataSources = getTenantsDataSources();\n        applyMigrationsToDataSources(tenantsDataSources);\n        setTargetDataSources(tenantsDataSources);\n        afterPropertiesSet();\n    }\n \n    private void applyMigrationsToDataSources(Map<Object, Object> tenantsDataSources) {\n        tenantsDataSources.values().forEach(v -> {\n            Flyway flyway = Flyway.configure()\n                    .dataSource((DataSource)v)\n                    .target(MigrationVersion.LATEST)\n                    .load();\n \n            flyway.migrate();\n        });\n    }\n     \n    private Map<Object, Object> getTenantsDataSources() {\n        Map<Object, Object> result = new HashMap<>();\n         \n        List<Tenant> tenants = tenantRepository.findAll();\n        for (Tenant tenant : tenants) {\n            String host = getValue(tenant.getId(), \"db_host\");\n            String db = getValue(tenant.getId(), \"db_name\");\n            String port = getValue(tenant.getId(), \"db_port\");\n            String user = getValue(tenant.getId(), \"db_user\");\n            String password = getValue(tenant.getId(), \"db_password\");\n \n            DataSourceBuilder<?> dataSourceBuilder = DataSourceBuilder.create();\n            dataSourceBuilder.url(\"jdbc:postgresql://\" + host + \":\" + port + \"/\" + db);\n            dataSourceBuilder.username(user);\n            dataSourceBuilder.password(password);\n \n            result.put(tenant.getId(), dataSourceBuilder.build());\n        }\n         \n        return result;\n    }\n \n    private String getValue(String id, String parameter) {\n        String encodedValue = consulClient.getKVValue(\"config/\" + id + \"/\" + parameter).getValue().getValue();\n        return new String(Base64.getDecoder().decode(encodedValue));\n    }\n \n    @Override\n    protected Object determineCurrentLookupKey() {\n        return tenantContext.getTenantId();\n    }\n}

In this class:

  1. we load the tenants' database configuration from consul (method getTenantsDataSources)
    • This step assumes that the configurations are present in consul under the directory: config/{tenant-id}/{config-key} where the value of config-key can be one of the following [ db_host , db_port , db_name , db_user , db_password ]
  2. if needed, we apply migrations to the databases using Flyway (method applyMigrationsToDataSources)
  3. we call on setTargetDataSources which tells the framework that these data sources will be exchanged at runtime based on some predefined key
  4. we override determineCurrentLookupKey which defines what key to consider when deciding which data source to connect to. We are setting it here to be the value of the tenant ID.
","createdDate":"2022-04-21T11:26:50.488","lastModifiedDate":"2022-04-21T11:26:50.488","keywords":["Multi-Tenancy","Spring Boot","Consul","Java"]},{"id":"jqXMdK4gwlE","title":"gRPC with Spring Boot and Consul","description":"In this article, we will go over integrating gRPC (Google Remote Procedure Call) with Spring Boot and Consul.","estimatedReadingTime":"6 min read","categoryId":"software-development","thumbnailImageUrl":null,"content":"

Motivation

gRPC (Google Remote Procedure Call) is a method of communication that is an alternative to REST, typically used for inter-services communication. The choice of going with gRPC over REST can be supported by the following points:

  1. gRPC gets rid of the overhead forced by REST when sending a request by providing metadata about the body's format. This is done through protobuf files shared for both the client and the server (we will visit this in more detail)
  2. Unlike REST, gRPC allows the user to send more than one request per connection. This saves us the network overhead that proceeds and follows a method call.

Setup

In order to integrate gRPC into your spring boot application, the following maven dependencies should be added to pom.xml:

Note that these dependencies should be presented in both the client and the server projects.

<properties>\n    <grpc.version>1.21.0</grpc.version>\n    <protobuf.version>3.19.4</protobuf.version>\n    <protobuf-plugin.version>0.6.1</protobuf-plugin.version>\n    <grpc-starter.version>3.3.0</grpc-starter.version>\n</properties>\n \n \n<dependencies>\n    <dependency>\n        <groupId>com.google.protobuf</groupId>\n        <artifactId>protobuf-java</artifactId>\n        <version>${protobuf.version}</version>\n    </dependency>\n    <dependency>\n        <groupId>io.grpc</groupId>\n        <artifactId>grpc-protobuf</artifactId>\n    </dependency>\n    <dependency>\n        <groupId>io.grpc</groupId>\n        <artifactId>grpc-stub</artifactId>\n    </dependency>\n    <dependency>\n        <groupId>javax.annotation</groupId>\n        <artifactId>javax.annotation-api</artifactId>\n        <version>1.3.2</version>\n    </dependency>\n    <dependency>\n        <groupId>io.github.lognet</groupId>\n        <artifactId>grpc-spring-boot-starter</artifactId>\n        <version>${grpc-starter.version}</version>\n    </dependency>\n</dependencies>\n \n<dependencyManagement>\n    <dependencies>\n        <dependency>\n            <groupId>io.grpc</groupId>\n            <artifactId>grpc-bom</artifactId>\n            <version>${grpc.version}</version>\n            <type>pom</type>\n            <scope>import</scope>\n        </dependency>\n    </dependencies>\n</dependencyManagement>\n \n<build>\n    <extensions>\n        <extension>\n            <groupId>kr.motd.maven</groupId>\n            <artifactId>os-maven-plugin</artifactId>\n            <version>1.6.2</version>\n        </extension>\n    </extensions>\n    <plugins>\n        <plugin>\n            <groupId>org.xolstice.maven.plugins</groupId>\n            <artifactId>protobuf-maven-plugin</artifactId>\n            <version>${protobuf-plugin.version}</version>\n            <configuration>\n                <protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}</protocArtifact>\n                <pluginId>grpc-java</pluginId>\n                <pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}</pluginArtifact>\n            </configuration>\n            <executions>\n                <execution>\n                    <goals>\n                        <goal>compile</goal>\n                        <goal>compile-custom</goal>\n                    </goals>\n                </execution>\n            </executions>\n        </plugin>\n    </plugins>\n</build>

Note: gRPC is not yet officially supported by spring boot and consul. So, grpc-spring-boot-starter is a well-maintained third-party library to use. The dependency is already added in the previous code block


 

Server Setup

Add the following configurations to your application.yml to specify the port on which the gRPC services will wait on and to allow registering them with consul:

spring:\n  application:\n    name: ci-service\n \n  cloud:\n    consul:\n      host: ${CONSUL_HOST:localhost}\n      port: 8500\n      discovery:\n        prefer-ip-address: true\n        instanceId: ${spring.application.name}:${spring.application.instance_id:${random.value}}\n \nserver:\n  port: ${PORT:8080}\n \ngrpc:\n  port: ${GRPC_PORT:7080}

gRPC cannot connect to the same port as the REST controller. Thus, by default, the REST controllers will be assigned to port ${server.port}, while the gRPC entries will be mapped to port ${grpc.port}

Protocol Buffers

As previously mentioned, gRPC depends on Protocol Buffers (.proto) files as an interface of the exposed services with the definition of their methods. These files should exist inside the directory src/main/proto/

src \n└───main\n    └─── java\n    └─── proto\n    |    └─── file1.proto\n    |    └─── file2.proto\n    └─── resources

Proto Syntax

First, we need to set the version of the proto file. This should be the first line of the file:

syntax = \"proto3\";

Second, we need to add some java options. These options are not required, however, they optimize the code genreation process to fit into the language conventions.

option java_multiple_files = true;\noption java_package = \"com.greetservice.grpc\";
  • java_multiple_files: when this option is set to true, each class of the generated code will physically live on a separate file.
  • java_package: as the name may imply, this option sets the package name where the generated code will live in.

Third, we need to add the definition of the service we want to expose. You can think of this service as an entry point to our application. It is similar to the concept of the REST controller, however, this one accepts RPC calls. 

service GreeterService {\n  rpc greetPerson(HelloNameRequest) returns (GreetingResponse);\n  rpc greetPeople(stream HelloNameRequest) returns (stream GreetingResponse);  \n  rpc sayHello(EmptyRequest) returns (GreetingResponse);\n  rpc sayHelloMultipleTimes(EmptyRequest) returns (stream GreetingResponse);\n}

As you can see, this service definition contains four method declarations. Each method declaration is formatted as follows:

rpc {name_of_method}({argument_type}) returns ({return_type})

In case a method takes a list as an argument, the keyword stream precedes the parameter type. And If it returns a list of objects, the keyword stream precedes the return type.

Following that, you add the definition of the types you already used:

message HelloNameRequest {\n  string name = 1;\n}\n \nmessage EmptyRequest { }\n \nmessage GreetingResponse {\n  string greeting = 1;\n}

Code Generation

Once your proto files are ready, make sure you added the needed dependencies to your pom.xml as mentioned in section 2, then build the project using: mvn clean install

Once the build finishes successfully, our service and types defined in the proto files will be generated for us under the directory: target/generated_sources/protobuf/

Note: you may want to reload the maven project for the changes to be effective.

gRPC Server Implementation

Now, we need to implement the logic in the GreeterService in the server project. To do that, create your own class in your server project that extends the following automatically generated class GreeterServiceGrpc.GreeterServiceImplBase

@GRpcService\n@AllArgsConstructor\npublic class GreeterGrpcService extends GreeterServiceGrpc.GreeterServiceImplBase {\n \n    @Override\n    public void greetPerson(HelloNameRequest request, StreamObserver<GreetingResponse> responseObserver) {\n        try {\n            String greeting = \"Hello, \" + request.getName() + \"!\";\n            GreetingResponse response = GreetingResponse.newBuilder().setGreeting(greeting).build();\n            responseObserver.onNext(response);\n               responseObserver.onCompleted();\n         } catch (Exception e) {\n            responseObserver.onError(e);\n        }\n    }\n \n    ...\n}

The @GRpcService Annotation

Typically, to register a gRPC service in java, you have to do something like this:

public class GrpcServer {\n    public static void main(String[] args) {\n        Server server = ServerBuilder\n          .forPort(8080)\n          .addService(new GreeterGrpcService()).build();\n \n        server.start();\n        server.awaitTermination();\n    }\n}

However, since we are using the grpc-spring-boot-starter library we recommended earlier, adding the annotation @GrpcService to your service class, will automatically register it at runtime on a server of port: grpc.port that is defined in application.yml

gRPC Client Implementation

After you make sure to copy the same proto files and place them in your client project under the directory src/main/proto , and successfully build the project through mvn clean install , you can create your client class as follows:

@Service\npublic class GreeterGrpcClient {\n \n    private final GreeterServiceGrpc.GreeterServiceBlockingStub greeterServiceBlockingStub;\n \n    public GreeterGrpcClient() {\n        ManagedChannel channel = ManagedChannelBuilder.forAddress(\"localhost\", 7080).usePlaintext().build();\n        greeterServiceBlockingStub = GreeterServiceGrpc.newBlockingStub(channel);\n    }\n \n    @Override\n    public string getGreeting(string name) {\n        GreetingResponse response = greeterServiceBlockingStub.greetPerson(new HelloNameRequest.Builder().setName(name).build());\n \n        return response.getName();\n    }\n \n    ...\n}

gRPC Client With Consul

If you are using a service registry tool, such as consul, you may use it to get the address of the server like follows:

Note that to get the gRPC address of a service, add the prefix grpc- to its instance name.

@Service\npublic class GreeterGrpcClient {\n \n    private final GreeterServiceGrpc.GreeterServiceBlockingStub greeterServiceBlockingStub;\n \n    public GreeterGrpcClient(DiscoveryClient discoveryClient) {\n        ServiceInstance serviceInstance = discoveryClient.getInstances(\"grpc-greeter-service\")\n                .stream().findFirst().orElseThrow(() -> new RuntimeException(\"gRPC service is unreachable.\"));\n  \n        ManagedChannel channel = ManagedChannelBuilder.forAddress(serviceInstance.getHost(), serviceInstance.getPort()).usePlaintext().build();\n        greeterServiceBlockingStub = GreeterServiceGrpc.newBlockingStub(channel);\n    }\n \n    @Override\n    public string getGreeting(string name) {\n        GreetingResponse response = greeterServiceBlockingStub.greetPerson(new HelloNameRequest.Builder().setName(name).build());\n \n        return response.getName();\n    }\n \n    ...\n}

gRPC with Postman!

Recently, Postman added the support for gRPC requests starting from v9.7.1 and above. Check their blog \"Postman Now Supports gRPC\" for more information on this matter.

","createdDate":"2022-04-19T17:29:04.371","lastModifiedDate":"2022-04-21T11:27:36.356","keywords":["gRPC","Spring Boot","Consul"]},{"id":"av7lxrNxN49","title":"The beautiful mind of a machine","description":"People often confuse artificial intelligence with human intelligence, claiming that the first will eventually take the place of the second. Well, to address such an issue, we might need to understand how artificial intelligence actually works, or how computers “think”. Is it the same way as human do? How do machines learn? And how can they predict and recommend stuff?","estimatedReadingTime":"6 min read","categoryId":"machine-learning","thumbnailImageUrl":"https://miro.medium.com/max/800/1*n05beme0IT1vPNjqmcH2SA.jpeg","content":"

No “raw” data… only numbers

Before we start thinking of these questions, we shall prepare the floor by defining our terms and methodologies. When we talk about computer “thinking” or about Machine Learning we cannot but start with the most important part of the process, i.e., the data. Data can refer to various types of recording and addressing information. It can be used for visual content, such as images and videos, it can also refer to textual matter like this article, or any other type of information, such as audio, numbers, graphs, etc.

One thing we should know is that computers don’t “deal” with all this. Like, a computer cannot actually see an image or read a text, it only understands numbers! Some would say “but how come a computer can actually show me a video or play an audio then?”. Well, that’s simple. For a machine to “deal” with other types of data, they should be mapped to numbers! For instance, if we are trying to address the following sentence to a machine: “I am happy to be with you.” we can simply create a bank of words, or a word bag, that contains all the words used in this sentence and assign them to numbers. This word bag would then assign the words as follows: I → 1, am → 2, happy → 3, to → 4, be → 5, with → 6, you → 7. Then we represent this sentence by an array of numbers, where each number refers to a word in the word bag: [1, 2, 3, 4, 5, 6, 7]. I can then use this vocabulary to represent any other sentence, let’s say “I want you to help me with my homework”. We can clearly see that some of the words are not found in the word bag, so they are replaced with an OOV (out of vocabulary) token, which can be represented as the number 0: [1, 0, 7, 4, 0, 0, 6, 0, 0]. We can definitely increase the size of our vocabulary if we want better performance, but that’s not the point. My point is that a machine can never address “raw” data, everything has to be mapped to numbers first. Such thing is applicable for images by encoding the pixels into triplets with the value of the Red, Green, and Blue intensities, and for audio by representing the change of frequencies or other features.
 

Brains understand context… machines spot pattern

As we can see here, machines do not interpret data the same way human brains do. What we know as Machine Learning is nothing but doing some calculations based on the input data (which is assumed to be in the form of numbers) and outputting some number(s). This number can then be used for some interpretation, e.g., if the number is between [0, 0.5[ then the image is of a cat, and if it is between [0.5, 1] then it is of a dog. The “learning” part here is that the machine undergoes tens, hundreds, and maybe thousands of rounds trying to decrease the value of the error, i.e., the times when it interpreted an image of a cat as a dog (gave it a value in the range of [0.5, 1]) and vice versa. The trick here is as if there were an ultimate labeler machine that our computer is trying to mimic. This machine can be thought of as a mathematical function that takes an array of numbers as an encoding of an image and outputs a number between 0 and 1 where the value labels the image, whether it is a cat or a dog.


 

Human brains have a different approach to doing such a job. The key difference here is that they understand the context, where what computers actually do is spotting a pattern among the trained data. Once we understand this difference between the two, we can come to a conclusion that machines behave according to the data they were trained on. For instance, if you haven’t shown the machine a single image of a dolphin, there is no way on Earth it would guess it right. As a matter of fact, it would probably label it as either a cat or a dog (unless there was an option for an “unknown” label). However, on the other hand, if you show a person an image of an animal they haven’t seen before, they are more likely to guess that it doesn’t lie under any of the known animal categories in their mind.
 

No multitasking!

Another key difference when comparing machines with the human brain is the purpose of the function. A single brain can perform millions of tasks. It can solve equations, play music, write essays, compare the taste of different foods, etc. However, there is no such thing as a super machine that can do everything. For example, the model that we described earlier can only predict if an image refers to a cat or a dog. It cannot let’s say, answer questions. What I am trying to say is that we need a model for every task we can think of, since a single model can only perform one and only one job. Although this job can sometimes be subdivided into several tasks, such as recognizing emotions from speech and facial expressions, that would just make it one complex task. This is a major part where a human brain can outrange hundred machine models.

Summary

Machines are of course superior to human brains in numerous tasks, such as doing calculations or repetitive work. That brings us to another difference between the two. Machines are faster and more efficient when programmed to do a repetitive task, while brains are smarter and more creative and controlling. Machines can label tens of cat and dog images in the time needed by a human to label a few of them. While on the other hand, a human can program a model to do the job for him!

In conclusion, I don’t think of the idea of machines and human brains competing with each other. However, I see it as a chance for collaboration and to benefit from each one’s skills. We don’t need people to do ordinary routine tasks, where we can program a machine to do it instead and in a faster and more efficient way. We can then benefit from the genius human brain in doing more creative tasks that a machine cannot do… or at least, not for now.

","createdDate":"2022-04-13T13:49:14.698","lastModifiedDate":"2022-04-13T13:49:14.698","keywords":["Machine Learning","Artifical Intelligence"]}] \ No newline at end of file diff --git a/src/test/java/me/ezzedine/mohammed/personalspace/article/infra/ArticleStorageManagerIntegrationTest.java b/src/test/java/me/ezzedine/mohammed/personalspace/article/infra/ArticleStorageManagerIntegrationTest.java index 7489cec..d46ba89 100644 --- a/src/test/java/me/ezzedine/mohammed/personalspace/article/infra/ArticleStorageManagerIntegrationTest.java +++ b/src/test/java/me/ezzedine/mohammed/personalspace/article/infra/ArticleStorageManagerIntegrationTest.java @@ -17,13 +17,13 @@ import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.mock.mockito.MockBean; +import java.time.temporal.ChronoUnit; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.UUID; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.when; @SpringBootTest(classes = { @@ -78,16 +78,16 @@ void the_article_should_be_persisted_in_the_storage() { assertEquals(ESTIMATED_READING_TIME, allArticles.get(0).getEstimatedReadingTime()); } -// @Test -// @DisplayName("the stored article should have the current date as the value for its creation date") -// void the_stored_article_should_have_the_current_date_as_the_value_for_its_creation_date() { -// storageManager.save(getArticle()); -// -// List allArticles = repository.findAll(); -// assertEquals(1, allArticles.size()); -// assertNotNull(allArticles.get(0).getCreatedDate()); -// assertEquals(allArticles.get(0).getCreatedDate(), allArticles.get(0).getLastModifiedDate()); -// } + @Test + @DisplayName("the stored article should have the current date as the value for its creation date") + void the_stored_article_should_have_the_current_date_as_the_value_for_its_creation_date() { + storageManager.save(getArticle()); + + List allArticles = repository.findAll(); + assertEquals(1, allArticles.size()); + assertNotNull(allArticles.get(0).getCreatedDate()); + assertEquals(allArticles.get(0).getCreatedDate(), allArticles.get(0).getLastModifiedDate()); + } @Test @DisplayName("should override any existing article with the same id") @@ -113,37 +113,37 @@ void should_override_any_existing_article_with_the_same_id() { assertEquals(ESTIMATED_READING_TIME, allArticles.get(0).getEstimatedReadingTime()); } -// @Test -// @DisplayName("should update the last modified date when overriding an existing article") -// void should_update_the_last_modified_date_when_overriding_an_existing_article() { -// ArticleEntity entity = repository.save(getRandomArticleEntity()); -// -// Article article = getArticle(); -// article.setVersion(entity.getVersion()); -// article.setCreatedDate(entity.getCreatedDate()); -// article.setLastModifiedDate(entity.getLastModifiedDate()); -// storageManager.save(article); -// -// List allArticles = repository.findAll(); -// assertEquals(1, allArticles.size()); -// assertNotEquals(entity.getLastModifiedDate(), allArticles.get(0).getLastModifiedDate()); -// } - -// @Test -// @DisplayName("should not update the created date when overriding an existing article") -// void should_not_update_the_created_date_when_overriding_an_existing_article() { -// ArticleEntity entity = repository.save(getRandomArticleEntity()); -// -// Article article = getArticle(); -// article.setVersion(entity.getVersion()); -// article.setCreatedDate(entity.getCreatedDate()); -// article.setLastModifiedDate(entity.getLastModifiedDate()); -// storageManager.save(article); -// -// List allArticles = repository.findAll(); -// assertEquals(1, allArticles.size()); -// assertEquals(entity.getCreatedDate().truncatedTo(ChronoUnit.SECONDS), allArticles.get(0).getCreatedDate().truncatedTo(ChronoUnit.SECONDS)); -// } + @Test + @DisplayName("should update the last modified date when overriding an existing article") + void should_update_the_last_modified_date_when_overriding_an_existing_article() { + ArticleEntity entity = repository.save(getRandomArticleEntity()); + + Article article = getArticle(); + article.setVersion(entity.getVersion()); + article.setCreatedDate(entity.getCreatedDate()); + article.setLastModifiedDate(entity.getLastModifiedDate()); + storageManager.save(article); + + List allArticles = repository.findAll(); + assertEquals(1, allArticles.size()); + assertNotEquals(entity.getLastModifiedDate(), allArticles.get(0).getLastModifiedDate()); + } + + @Test + @DisplayName("should not update the created date when overriding an existing article") + void should_not_update_the_created_date_when_overriding_an_existing_article() { + ArticleEntity entity = repository.save(getRandomArticleEntity()); + + Article article = getArticle(); + article.setVersion(entity.getVersion()); + article.setCreatedDate(entity.getCreatedDate()); + article.setLastModifiedDate(entity.getLastModifiedDate()); + storageManager.save(article); + + List allArticles = repository.findAll(); + assertEquals(1, allArticles.size()); + assertEquals(entity.getCreatedDate().truncatedTo(ChronoUnit.SECONDS), allArticles.get(0).getCreatedDate().truncatedTo(ChronoUnit.SECONDS)); + } private static ArticleEntity getRandomArticleEntity() { return ArticleEntity.builder().id(ID).title(UUID.randomUUID().toString()).categoryId(UUID.randomUUID().toString()) @@ -168,24 +168,24 @@ void it_should_return_an_empty_optional_if_the_article_does_not_exist() { assertTrue(optionalArticle.isEmpty()); } -// @Test -// @DisplayName("it should return the article when it exists") -// void it_should_return_the_article_when_it_exists() { -// repository.save(getEntity()); -// Optional
optionalArticle = storageManager.fetch(ID); -// assertTrue(optionalArticle.isPresent()); -// assertEquals(ID, optionalArticle.get().getId()); -// assertEquals(DESCRIPTION, optionalArticle.get().getDescription()); -// assertEquals(CONTENT, optionalArticle.get().getContent()); -// assertEquals(TITLE, optionalArticle.get().getTitle()); -// assertEquals(CATEGORY_ID, optionalArticle.get().getCategoryId()); -// assertEquals(THUMBNAIL_IMAGE_URL, optionalArticle.get().getThumbnailImageUrl()); -// assertEquals(List.of(KEYWORD), optionalArticle.get().getKeywords()); -// assertNotNull(optionalArticle.get().getCreatedDate()); -// assertNotNull(optionalArticle.get().getLastModifiedDate()); -// assertEquals(HIDDEN, optionalArticle.get().isHidden()); -// assertEquals(ESTIMATED_READING_TIME, optionalArticle.get().getEstimatedReadingTime()); -// } + @Test + @DisplayName("it should return the article when it exists") + void it_should_return_the_article_when_it_exists() { + repository.save(getEntity()); + Optional
optionalArticle = storageManager.fetch(ID); + assertTrue(optionalArticle.isPresent()); + assertEquals(ID, optionalArticle.get().getId()); + assertEquals(DESCRIPTION, optionalArticle.get().getDescription()); + assertEquals(CONTENT, optionalArticle.get().getContent()); + assertEquals(TITLE, optionalArticle.get().getTitle()); + assertEquals(CATEGORY_ID, optionalArticle.get().getCategoryId()); + assertEquals(THUMBNAIL_IMAGE_URL, optionalArticle.get().getThumbnailImageUrl()); + assertEquals(List.of(KEYWORD), optionalArticle.get().getKeywords()); + assertNotNull(optionalArticle.get().getCreatedDate()); + assertNotNull(optionalArticle.get().getLastModifiedDate()); + assertEquals(HIDDEN, optionalArticle.get().isHidden()); + assertEquals(ESTIMATED_READING_TIME, optionalArticle.get().getEstimatedReadingTime()); + } } @Nested @@ -205,25 +205,25 @@ void should_return_an_empty_list_when_no_articles_exist() { assertTrue(articles.getItems().isEmpty()); } -// @Test -// @DisplayName("should return all the articles when they are less than the size of the first page") -// void should_return_all_the_articles_when_they_are_less_than_the_size_of_the_first_page() { -// repository.save(getEntity()); -// PaginationCriteria paginationCriteria = PaginationCriteria.builder().startingPageIndex(0).maximumPageSize(100).build(); -// Page
articles = storageManager.fetchAll(ArticlesFetchCriteria.builder().paginationCriteria(paginationCriteria).build()); -// assertEquals(1, articles.getTotalSize()); -// assertEquals(ID, articles.getItems().get(0).getId()); -// assertEquals(TITLE, articles.getItems().get(0).getTitle()); -// assertEquals(DESCRIPTION, articles.getItems().get(0).getDescription()); -// assertEquals(CONTENT, articles.getItems().get(0).getContent()); -// assertEquals(CATEGORY_ID, articles.getItems().get(0).getCategoryId()); -// assertEquals(THUMBNAIL_IMAGE_URL, articles.getItems().get(0).getThumbnailImageUrl()); -// assertEquals(List.of(KEYWORD), articles.getItems().get(0).getKeywords()); -// assertNotNull(articles.getItems().get(0).getCreatedDate()); -// assertNotNull(articles.getItems().get(0).getLastModifiedDate()); -// assertEquals(HIDDEN, articles.getItems().get(0).isHidden()); -// assertEquals(ESTIMATED_READING_TIME, articles.getItems().get(0).getEstimatedReadingTime()); -// } + @Test + @DisplayName("should return all the articles when they are less than the size of the first page") + void should_return_all_the_articles_when_they_are_less_than_the_size_of_the_first_page() { + repository.save(getEntity()); + PaginationCriteria paginationCriteria = PaginationCriteria.builder().startingPageIndex(0).maximumPageSize(100).build(); + Page
articles = storageManager.fetchAll(ArticlesFetchCriteria.builder().paginationCriteria(paginationCriteria).build()); + assertEquals(1, articles.getTotalSize()); + assertEquals(ID, articles.getItems().get(0).getId()); + assertEquals(TITLE, articles.getItems().get(0).getTitle()); + assertEquals(DESCRIPTION, articles.getItems().get(0).getDescription()); + assertEquals(CONTENT, articles.getItems().get(0).getContent()); + assertEquals(CATEGORY_ID, articles.getItems().get(0).getCategoryId()); + assertEquals(THUMBNAIL_IMAGE_URL, articles.getItems().get(0).getThumbnailImageUrl()); + assertEquals(List.of(KEYWORD), articles.getItems().get(0).getKeywords()); + assertNotNull(articles.getItems().get(0).getCreatedDate()); + assertNotNull(articles.getItems().get(0).getLastModifiedDate()); + assertEquals(HIDDEN, articles.getItems().get(0).isHidden()); + assertEquals(ESTIMATED_READING_TIME, articles.getItems().get(0).getEstimatedReadingTime()); + } @Test @DisplayName("should return the first page only when there are more articles than the mentioned size")