diff --git a/src/index.css b/src/index.css index ef00dc4..38fe593 100644 --- a/src/index.css +++ b/src/index.css @@ -116,6 +116,16 @@ input[type="range"]:focus { width: 60vw; } +.responsive-img-width { + width: 60vw; +} + +@media (min-width:768px) { + .responsive-img-width { + width: 35vw; + } +} + .sobel-image-width { width: 60vw; } diff --git a/src/landingPage/ModuleIntro.tsx b/src/landingPage/ModuleIntro.tsx index 0da9a16..8c6d4c1 100644 --- a/src/landingPage/ModuleIntro.tsx +++ b/src/landingPage/ModuleIntro.tsx @@ -1,8 +1,13 @@ import React from 'react'; -import mod8 from '../media/modules/mod8.png'; -import mod9 from '../media/modules/mod9.png'; -import mod10 from '../media/modules/mod10.png'; -import mod11 from '../media/modules/mod11.png'; +import mod8 from '../media/modules/previews/mod8.png'; +import mod9 from '../media/modules/previews/mod9.png'; +import mod10 from '../media/modules/previews/mod10.png'; +import mod11 from '../media/modules/previews/mod11.png'; +import kernel from '../media/modules/previews/kernel.png'; +import gausBlur from '../media/modules/previews/gausBlur.png'; +import gabor from '../media/modules/previews/gabor.png'; +import sobel from '../media/modules/previews/sobel.png'; +import hog from '../media/modules/previews/hog.png'; function getCoverImage(imgName: string) { switch (imgName) { @@ -14,6 +19,16 @@ function getCoverImage(imgName: string) { return mod10; case 'mod11': return mod11; + case 'kernel': + return kernel; + case 'gausBlur': + return gausBlur; + case 'gabor': + return gabor; + case 'sobel': + return sobel; + case 'hog': + return hog; default: return undefined; } diff --git a/src/media/modules/computerVision/combinedSobelKernelExampleDark.png b/src/media/modules/computerVision/combinedSobelKernelExampleDark.png new file mode 100644 index 0000000..b6f242a Binary files /dev/null and b/src/media/modules/computerVision/combinedSobelKernelExampleDark.png differ diff --git a/src/media/modules/computerVision/combinedSobelKernelExampleLight.png b/src/media/modules/computerVision/combinedSobelKernelExampleLight.png new file mode 100644 index 0000000..ec041ae Binary files /dev/null and b/src/media/modules/computerVision/combinedSobelKernelExampleLight.png differ diff --git a/src/media/modules/computerVision/vertSobelExampleDark.png b/src/media/modules/computerVision/vertSobelExampleDark.png new file mode 100644 index 0000000..ec149d0 Binary files /dev/null and b/src/media/modules/computerVision/vertSobelExampleDark.png differ diff --git a/src/media/modules/computerVision/vertSobelExampleLight.png b/src/media/modules/computerVision/vertSobelExampleLight.png new file mode 100644 index 0000000..4c6b27e Binary files /dev/null and b/src/media/modules/computerVision/vertSobelExampleLight.png differ diff --git a/src/media/modules/module_descriptions.json b/src/media/modules/module_descriptions.json index 428d147..6fc9ec7 100644 --- a/src/media/modules/module_descriptions.json +++ b/src/media/modules/module_descriptions.json @@ -12,6 +12,66 @@ "path": "computer-vision", "active": true }, + { + "number": 8.1, + "title": "Intro to Images and Kernels", + "dropdownTitle": "Images and Kernels", + "body": "To begin, we discuss how an image is represented as data so it can be used in computer vision algorithms. We also introduce the kernel, and how it relates to the task of extracting features from images.", + "bgColor": "bg-darkblue", + "textColor": "text-white", + "margin": "ml-64", + "imgSrc": "kernel", + "path": "images-and-kernels", + "active": true + }, + { + "number": 8.2, + "title": "Image Blurring: The Gaussian Blur", + "dropdownTitle": "Gaussian Blur", + "body": "Here we explore the Gaussian Blur, a popular computer vision technique that blurs an image. It uses a kernel with values obtained from the Gaussian function.", + "bgColor": "bg-offwhite", + "textColor": "text-darkblue", + "margin": "mr-64", + "imgSrc": "gausBlur", + "path": "gaussian-blur", + "active": true + }, + { + "number": 8.3, + "title": "Directional Filtering: The Gabor Filter", + "dropdownTitle": "Gabor Filter", + "body": "This section explores another computer vision technique called the Gabor Filter, which extracts portions of an image that follow a specific directional pattern.", + "bgColor": "bg-lightblue", + "textColor": "text-darkblue", + "margin": "ml-64", + "imgSrc": "gabor", + "path": "gabor-filter", + "active": true + }, + { + "number": 8.4, + "title": "Edge Detection: The Sobel Filter", + "dropdownTitle": "Sobel Filter", + "body": "In order to detect an object within an image, we search for edges in the image to give us a sense of the shape of the object. We use the Sobel Filter to detect edges in a particular direction in an image.", + "bgColor": "bg-darkblue", + "textColor": "text-white", + "margin": "mr-64", + "imgSrc": "sobel", + "path": "sobel-filter", + "active": true + }, + { + "number": 8.4, + "title": "Histogram of (Oriented) Gradients", + "dropdownTitle": "Histogram of Gradients", + "body": "While a Sobel Filter can give us information about edges in a singular direction, we need to know about the edges in all directions simultaneously to fully understand the shape of the image. The Histogram of Gradients allows us to collect the edges in all different directions.", + "bgColor": "bg-lightblue", + "textColor": "text-darkblue", + "margin": "ml-64", + "imgSrc": "hog", + "path": "histogram-of-gradients", + "active": true + }, { "number": 9, "title": "Recognition as a Classification Problem", @@ -19,7 +79,7 @@ "body": "Once features have been detected and, perhaps combined into “higher-order features”, machine vision algorithms are often tasked to perform classification–that is, to determine what an array of features is “of”. Does this bundle of features in an image indicate the presence of a dog? Does that bundle of features over there indicate a cat? A set of features can be thought of as a location in a “state space”. Nearby locations in state space tend to correspond to similar objects or scenes. Features of many dogs of the same breed may form a “cluster” in state space. We will explore clusters of features in some simple state spaces and begin to ask how different “naturally occurring” clusters (corresponding to dogs and cats in our example) might be detected by a machine.", "bgColor": "bg-darkblue", "textColor": "text-white", - "margin": "ml-64", + "margin": "mr-64", "imgSrc": "mod9", "path": "classification", "active": true @@ -31,7 +91,7 @@ "body": "Today’s remarkable advances in the power of artificial neural networks leverage foundations laid in 1943 by the McCulloch and Pitts neuron model and Rosenblatt’s 1958 software implementation of the first perceptron. We will examine the architecture and functions of the simplest trainable pattern classifier.This classifier is so simple, in fact, that it cannot effectively separate clusters that are not “linearly separable”. Two clusters in a two-dimensional state space that cannot be separated by a single straight line are examples of non-linearly separable classes. What does the perceptron do when confronted with a data-set that is not linearly separable?", "bgColor": "bg-offwhite", "textColor": "text-darkblue", - "margin": "mr-64", + "margin": "ml-64", "imgSrc": "mod10", "path": "perceptrons", "active": false @@ -43,7 +103,7 @@ "body": "The problem of trying to separate clusters that are not linearly separable has been known for more than half a century, and many variations of multi-layer perceptrons continue to be developed to address and overcome limitations of earlier approaches. Multi-layered artificial neural networks are often said to have “hidden layers” where features are iteratively “weighted” in various ways until a desired classification performance is reached. We will consider a simple example of how hidden layers help to solve the “exclusive-OR problem” and explore the “credit assignment problem.” How does an algorithm know which weights to change and by how much, in order to promote learning for improved classification by a neural network?", "bgColor": "bg-darkblue", "textColor": "text-white", - "margin": "ml-64", + "margin": "mr-64", "imgSrc": "mod11", "path": "neural-nets", "active": false diff --git a/src/media/modules/previews/gabor.png b/src/media/modules/previews/gabor.png new file mode 100644 index 0000000..3eea63d Binary files /dev/null and b/src/media/modules/previews/gabor.png differ diff --git a/src/media/modules/previews/gausBlur.png b/src/media/modules/previews/gausBlur.png new file mode 100644 index 0000000..c505a86 Binary files /dev/null and b/src/media/modules/previews/gausBlur.png differ diff --git a/src/media/modules/previews/hog.png b/src/media/modules/previews/hog.png new file mode 100644 index 0000000..cb7b8e5 Binary files /dev/null and b/src/media/modules/previews/hog.png differ diff --git a/src/media/modules/previews/kernel.png b/src/media/modules/previews/kernel.png new file mode 100644 index 0000000..324f135 Binary files /dev/null and b/src/media/modules/previews/kernel.png differ diff --git a/src/media/modules/mod10.png b/src/media/modules/previews/mod10.png similarity index 100% rename from src/media/modules/mod10.png rename to src/media/modules/previews/mod10.png diff --git a/src/media/modules/mod11.png b/src/media/modules/previews/mod11.png similarity index 100% rename from src/media/modules/mod11.png rename to src/media/modules/previews/mod11.png diff --git a/src/media/modules/mod8.png b/src/media/modules/previews/mod8.png similarity index 100% rename from src/media/modules/mod8.png rename to src/media/modules/previews/mod8.png diff --git a/src/media/modules/mod9.png b/src/media/modules/previews/mod9.png similarity index 100% rename from src/media/modules/mod9.png rename to src/media/modules/previews/mod9.png diff --git a/src/media/modules/previews/sobel.png b/src/media/modules/previews/sobel.png new file mode 100644 index 0000000..f9e0ad9 Binary files /dev/null and b/src/media/modules/previews/sobel.png differ diff --git a/src/media/modules/text/computer-vision-intro.json b/src/media/modules/text/computer-vision-intro.json new file mode 100644 index 0000000..c736c89 --- /dev/null +++ b/src/media/modules/text/computer-vision-intro.json @@ -0,0 +1,49 @@ +{ + "title": "Intro to Computer Vision: Images and Kernels", + "sections": [ + { + "title": "What is Computer Vision?", + "colorScheme": "dark", + "subsections": [ + { + "title": "", + "body": "In order to extract valuable information from an image, computers must be able to process images and detect features in those images based on some criteria. As humans, we do this all the time - if we see an animal, we can identify it as a cat because it has fur, perked up ears, a small nose, and bright eyes. We extract features/traits of the objects in our view that allow us to identify those objects.", + "imgSrc": "blank" + }, + { + "title": "", + "body": "Computer vision is conceptually not all that different! To identify features in an image, a small window called a *kernel* slides across the image and at every step, makes a calculation.", + "imgSrc": "blank" + }, + { + "title": "", + "body": "Depending on what trait the kernel is supposed to capture, that calculation reveals whether that trait is present at the current location of the window or not. These calculations are then joined into a new image that represents the result of sliding that window across the original image.", + "imgSrc": "blank" + } + ], + "demoComp": "" + }, + { + "title": "What is a Kernel?", + "colorScheme": "light", + "subsections": [ + { + "title": "Subsection 1", + "body": "Let's take a step back and define what an image is first. An image is made up of individual pixels which each have a value that represents the color, and those pixels are organized into a grid of values to give us the resulting image.", + "imgSrc": "animation1" + }, + { + "title": "Subsection 2", + "body": "Kernels are not all the different from images in that sense - we specify a much smaller grid of numbers that, based on their value and organization are capable of extracting a \"feature\" from the part of the image the kernel is currently sitting on. A kernel might find the edges of an object in an image, sharpen the details of an image, or smooth the image out.", + "imgSrc": "animation2" + }, + { + "title": "Subsection 3", + "body": "The way this feature extraction occurs is by making a 'window' the size of the kernel around a pixel in the original image. We then multiply the original image's pixel values by the numbers in the kernel, and sum them all up. We compute this sum for all pixels by sliding this 'window' across every pixel in the image and repeating the math at every step. As we slide across the image, we stitch together the new values after applying the kernel to create a brand new image!", + "imgSrc": "animation3" + } + ], + "demoComp": "" + } + ] +} \ No newline at end of file diff --git a/src/media/modules/text/gabor-filter.json b/src/media/modules/text/gabor-filter.json new file mode 100644 index 0000000..380c66e --- /dev/null +++ b/src/media/modules/text/gabor-filter.json @@ -0,0 +1,27 @@ +{ + "title": "Gabor Filter", + "sections": [ + { + "title": "", + "colorScheme": "light", + "subsections": [ + { + "title": "Subsection 1", + "body": "The Gabor Filter is a bit more complicated in its definition but the features that it aims to extract are just as intuitive as the Gaussian Blur. Instead of wanting to extract the overall structure of the image, now we are interested in extracting portions of an image that follow a specific directional pattern. The canonical example of this is wanting to extract stripes on a zebra that are in a particular direction.", + "imgSrc": "blank" + }, + { + "title": "Subsection 2", + "body": "The gist of how the filter works is that we can detect certain orientations by modifying our angle θ and specifying the size of the window we slide across the image. We can also specify a frequency or magnitude for the directionality that assesses how strong the directionality is as a filter.", + "imgSrc": "blank" + }, + { + "title": "Subsection 3", + "body": "The resulting image contains only the windows of the image that contain features that follow the angle specified by θ. This can be highly effective when scanning images which contain text where we only want to retain the text in the image. The reason for this is that images generally have lower directional content as they are smoother relative to text.", + "imgSrc": "blank" + } + ], + "demoComp": "GaborDemo" + } + ] +} diff --git a/src/media/modules/text/gaussian-blur.json b/src/media/modules/text/gaussian-blur.json new file mode 100644 index 0000000..36fd53f --- /dev/null +++ b/src/media/modules/text/gaussian-blur.json @@ -0,0 +1,54 @@ +{ + "title": "Gaussian Blur", + "sections": [ + { + "title": "Gaussian Blur", + "colorScheme": "dark", + "subsections": [ + { + "title": "Subsection 1", + "body": "As its name suggests, the Gaussian blur is used for blurring. This is useful to make sure that computer vision algorithms don't focus too much on the details of an image. The same way we don't recognize a cell phone by its serial number, an image processing system should focus on the big picture when trying to recognize objects.", + "imgSrc": "blank" + }, + { + "title": "Subsection 2", + "body": "The definition of a Gaussian Blur Filter comes from the famous Gaussian/Normal/Bell curve as though it were projected into 3D. The values of the kernel are based on the density of (area under) the bell curve at the location of the current kernel entry. For example, the center of the kernel is the center of the bell curve where the density is the highest; therefore, the largest value of the kernel should be at the center. Conversely, values on the edges of the kernel should be relatively the smallest.", + "imgSrc": "blank" + }, + { + "title": "Subsection 3", + "body": "We can control the change in the magnitude of the values from the center to the outsides of the kernel (spread of the bell curve) by modifying the standard deviation of the distribution using the σ (sigma) parameter. The last thing we want to make sure about the kernel is that the values sum up to approximately 1. If we didn't do this, then the image would end up brighter or darker than intended (called the image's \"energy\"). We can do this by summing up all of the values and dividing every entry by the sum.", + "imgSrc": "blank" + }, + { + "title": "Subsection 3", + "body": "This kernel defocuses the middle pixel of the window by averaging its value with its neighbors. Done across the whole image, you retain the picture's overall structure without sharp details. This achieves our original goal of making a blurred image.", + "imgSrc": "blank" + } + ], + "demoComp": "GaussianBlurDemo" + }, + { + "title": "Difference of Gaussians", + "colorScheme": "light", + "subsections": [ + { + "title": "Subsection 1", + "body": "The motivation behind the Difference of Gaussians technique is to detect edges in an image. This is particularly useful for segmenting objects from one another or creating a coloring book from pictures on your phone!", + "imgSrc": "blank" + }, + { + "title": "Subsection 2", + "body": "A difference of Gaussians Kernel builds directly upon what we have already covered in Gaussian Blurring. We take two images processed using Gaussian Blur filters with different standard deviations (σ, sigma) and subtract them from each other. We want the image that we are subtracting from to have been processed with a Gaussian Blur filter that has a smaller standard deviation (less spread) than the image that we are subtracting.", + "imgSrc": "blank" + }, + { + "title": "Subsection 3", + "body": "You can think of this technique as extracting what changes most when you blur. If you think about, the sharpest parts of the image, or, the edges of objects, undergo the most change when blurring. When finding the difference between two images at different sigma values, the sharpest parts of the image, or the edges, are going to be the only thing left.", + "imgSrc": "blank" + } + ], + "demoComp": "DiffOfGaussian" + } + ] +} \ No newline at end of file diff --git a/src/media/modules/text/histogram-of-gradients.json b/src/media/modules/text/histogram-of-gradients.json new file mode 100644 index 0000000..9abbc9b --- /dev/null +++ b/src/media/modules/text/histogram-of-gradients.json @@ -0,0 +1,27 @@ +{ + "title": "Histogram of Gradients", + "sections": [ + { + "title": "Histogram of Gradients", + "colorScheme": "dark", + "subsections": [ + { + "title": "Subsection 1", + "body": "", + "imgSrc": "blank" + }, + { + "title": "Subsection 2", + "body": "", + "imgSrc": "blank" + }, + { + "title": "Subsection 3", + "body": "", + "imgSrc": "blank" + } + ], + "demoComp": "" + } + ] +} \ No newline at end of file diff --git a/src/media/modules/text/sobel-filter.json b/src/media/modules/text/sobel-filter.json new file mode 100644 index 0000000..509bcd3 --- /dev/null +++ b/src/media/modules/text/sobel-filter.json @@ -0,0 +1,27 @@ +{ + "title": "Sobel Filter", + "sections": [ + { + "title": "", + "colorScheme": "light", + "subsections": [ + { + "title": "Subsection 1", + "body": "The Sobel Filter is popularly used in image detection algorithms to extract edges from an image. The filter uses a 3x3 kernel that detects the image gradient, a directional change in color in the image, for a given direction. The example kernel detects a change in color between the left and right sides of the current pixel, which indicates a vertical edge. ", + "imgSrc": "sobelKernelDark" + }, + { + "title": "Subsection 2", + "body": "Note that the above kernel only detects a dark-to-light gradient (from left to right); to capture all vertical edges, we need to also use a \"mirror-image\" kernel that has positive values in the left column and negative values in the right (shown here).", + "imgSrc": "sobelKernelLight" + }, + { + "title": "Subsection 3", + "body": "To get a more accurate understanding about the shape of the image, we need to extract the image gradient in the four primary edge directions: vertical, horizontal, diagonal up (45 degrees), and diagonal down (-45 degrees). The demo below calculates the images resulting from filtering by the eight Sobel kernels (4 directions, 2 filters per direction for dark-to-light and light-to-dark) for a stop sign image. Note that you can select other images to filter from the box below.", + "imgSrc": "blank" + } + ], + "demoComp": "SobelFilterDemo" + } + ] +} \ No newline at end of file diff --git a/src/modulePage/ModulePage.tsx b/src/modulePage/ModulePage.tsx index 21821fb..a13ee37 100644 --- a/src/modulePage/ModulePage.tsx +++ b/src/modulePage/ModulePage.tsx @@ -5,6 +5,11 @@ import module8 from '../media/modules/module_8.json'; import module9 from '../media/modules/module_9.json'; import module10 from '../media/modules/module_10.json'; import module11 from '../media/modules/module_11.json'; +import cvIntroModule from '../media/modules/text/computer-vision-intro.json'; +import gaborModule from '../media/modules/text/gabor-filter.json'; +import gaussianModule from '../media/modules/text/gaussian-blur.json'; +import sobelModule from '../media/modules/text/sobel-filter.json'; +// import hogModule from '../media/modules/text/histogram-of-gradients.json'; export interface ModuleSubsectionType { title: string; @@ -20,15 +25,23 @@ interface ModuleSectionType { } interface Module { - number: number; title: string; sections: ModuleSectionType[]; } -const modules: Record = { +// eslint-disable-next-line +type moduleName = 'computer-vision' | 'images-and-kernels' | 'gaussian-blur' | 'gabor-filter' | 'sobel-filter' | 'histogram-of-gradients' | 'classification' | 'perceptron' | 'neural-nets'; +const modules: Record = { 'computer-vision': module8, - classification: module9, - perceptrons: module10, + 'images-and-kernels': cvIntroModule, + 'gaussian-blur': gaussianModule, + 'gabor-filter': gaborModule, + 'sobel-filter': sobelModule, + 'histogram-of-gradients': null, + // eslint-disable-next-line + 'classification': module9, + // eslint-disable-next-line + 'perceptron': module10, 'neural-nets': module11, }; @@ -38,18 +51,15 @@ const modules: Record = { */ export default function ModulePage( - props: RouteComponentProps<{ module: string }>, + props: RouteComponentProps<{ module: moduleName }>, ) { - const { - match: { - params: { module }, - }, - } = props; - const curModule = modules[module]; + // eslint-disable-next-line + const module = props.match.params.module; + const curModule: Module | null = modules[module]; if (!curModule) { return ( -
+

This module does not exist.

Return to home
diff --git a/src/modulePage/ModuleSection.tsx b/src/modulePage/ModuleSection.tsx index 2cec87b..c1d7ee9 100644 --- a/src/modulePage/ModuleSection.tsx +++ b/src/modulePage/ModuleSection.tsx @@ -1,28 +1,35 @@ -import React from 'react'; -import SobelFilterDemo from '../modules/computerVision/sobelFilter/SobelFilterDemo'; -import GaussianBlurDemo from '../modules/computerVision/gaussianBlur/GaussianBlurDemo'; -import GaborDemo from '../modules/computerVision/gaborFilter/gaborFilter'; -import DiffOfGaussianDemo from '../modules/computerVision/diffofgaussian/DiffOfGaussian'; -import HaarWaveletDemo from '../modules/computerVision/haarWavelet/HaarWaveletDemo'; -import { ImageSelectableDemo } from '../modules/computerVision/imageSelector/ImageSelectableDemo'; +/* eslint-disable */ +import React from "react"; +import SobelFilterDemo from "../modules/computerVision/sobelFilter/SobelFilterDemo"; +import GaussianBlurDemo from "../modules/computerVision/gaussianBlur/GaussianBlurDemo"; +import GaborDemo from "../modules/computerVision/gaborFilter/gaborFilter"; +import DiffOfGaussianDemo from "../modules/computerVision/diffofgaussian/DiffOfGaussian"; +import HaarWaveletDemo from "../modules/computerVision/haarWavelet/HaarWaveletDemo"; +import { ImageSelectableDemo } from "../modules/computerVision/imageSelector/ImageSelectableDemo"; import PCADemo, { RawDataTable, SelectableAxisChart, StaticAxisChart, config as pcaConfig, -} from '../modules/stateSpaces/pca/PCA'; +} from "../modules/stateSpaces/pca/PCA"; import KMeans, { KMeansStepExample, InteractiveClusteringExample, -} from '../modules/stateSpaces/kmeans'; -import blank from '../media/modules/blank.png'; -import animation1 from '../media/modules/computerVision/animation-1.gif'; -import animation2 from '../media/modules/computerVision/animation-2.gif'; -import animation3 from '../media/modules/computerVision/animation-3.gif'; +} from "../modules/stateSpaces/kmeans"; +import blank from "../media/modules/blank.png"; +import animation1 from "../media/modules/computerVision/animation-1.gif"; +import animation2 from "../media/modules/computerVision/animation-2.gif"; +import animation3 from "../media/modules/computerVision/animation-3.gif"; +import combinedSobelKernelExampleLight from "../media/modules/computerVision/combinedSobelKernelExampleLight.png"; +import combinedSobelKernelExampleDark from "../media/modules/computerVision/combinedSobelKernelExampleDark.png"; +import vertSobelExampleLight from "../media/modules/computerVision/sobelKernels/vertical_lighttodark.png"; +import vertSobelExampleDark from "../media/modules/computerVision/sobelKernels/vertical_darktolight.png"; +// import lightVertSobelExampleLight from '../media/modules/computerVision/vertSobelExampleLight.png'; +// import lightVertSobelExampleDark from '../media/modules/computerVision/vertSobelExampleDark.png'; const lorem = - 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'; + "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; interface ColorScheme { bgColor: string; @@ -40,14 +47,22 @@ interface ModuleSubsection { function GetImage(imgName: string) { switch (imgName) { - case 'blank': + case "blank": return blank; - case 'animation1': + case "animation1": return animation1; - case 'animation2': + case "animation2": return animation2; - case 'animation3': + case "animation3": return animation3; + case "sobelKernelLight": + return vertSobelExampleLight; + case "sobelKernelDark": + return vertSobelExampleDark; + case "combinedSobelKernelLight": + return combinedSobelKernelExampleLight; + case "combinedSobelKernelDark": + return combinedSobelKernelExampleDark; default: } } @@ -56,7 +71,7 @@ function getDemo(comp: string, scheme: ColorScheme) { const demoArgs = { labelColor: scheme.titleColor }; switch (comp) { - case 'GaussianBlurDemo': + case "GaussianBlurDemo": return ( ); - case 'GaborDemo': + case "GaborDemo": return ( ); - case 'DiffOfGaussian': + case "DiffOfGaussian": return ( ); - case 'HaarWaveletDemo': + case "HaarWaveletDemo": return ( ); - case 'SobelFilterDemo': + case "SobelFilterDemo": return ( ); - case 'PCADemo': + case "PCADemo": return ; - case 'RawDataTable': + case "RawDataTable": return ; - case 'StaticAxisChart': + case "StaticAxisChart": return ( ); - case 'SelectableAxisChart': + case "SelectableAxisChart": return ( ); - case 'PCASelectableAxisChart': + case "PCASelectableAxisChart": return ( ); - case 'InteractiveKMeans': + case "InteractiveKMeans": return (
); - case 'StepKMeans': + case "StepKMeans": return (
); - case 'KMeans': + case "KMeans": return ; default: return
; @@ -171,20 +186,20 @@ const ModuleSection: React.FC = ({ demoComp, }) => { const scheme = - colorScheme === 'dark' + colorScheme === "dark" ? { - bgColor: 'bg-moduleDarkBlue', - titleColor: 'text-modulePaleBlue', - headingColor: 'text-moduleTeal', - bodyColor: 'text-moduleOffwhite', - labelColorHex: '#CBD9F2', + bgColor: "bg-moduleDarkBlue", + titleColor: "text-modulePaleBlue", + headingColor: "text-moduleTeal", + bodyColor: "text-moduleOffwhite", + labelColorHex: "#CBD9F2", } : { - bgColor: 'bg-modulePaleBlue', - titleColor: 'text-moduleNavy', - headingColor: 'text-moduleDarkBlue', - bodyColor: 'text-moduleNavy', - labelColorHex: '#394D73', + bgColor: "bg-modulePaleBlue", + titleColor: "text-moduleNavy", + headingColor: "text-moduleDarkBlue", + bodyColor: "text-moduleNavy", + labelColorHex: "#394D73", }; return ( @@ -200,34 +215,42 @@ const ModuleSection: React.FC = ({ {sections.map((section, index) => (
-

- {section.body || lorem} -

+ {section.body?.includes("") ? ( +

+

+ ) : ( +

+ {section.body || lorem} +

+ )}
diff --git a/src/modules/computerVision/common/InteractiveFilter.tsx b/src/modules/computerVision/common/InteractiveFilter.tsx index 7ea0710..a8ec30c 100644 --- a/src/modules/computerVision/common/InteractiveFilter.tsx +++ b/src/modules/computerVision/common/InteractiveFilter.tsx @@ -65,13 +65,13 @@ const InteractiveFilter: React.FC = ({
input