diff --git a/Neural Networks/.DS_Store b/Neural Networks/.DS_Store new file mode 100644 index 00000000..5f15b6c4 Binary files /dev/null and b/Neural Networks/.DS_Store differ diff --git a/Neural Networks/Drowsiness detection/Drowsiness Detection(1).ipynb b/Neural Networks/Drowsiness detection/Drowsiness Detection(1).ipynb new file mode 100644 index 00000000..317701b1 --- /dev/null +++ b/Neural Networks/Drowsiness detection/Drowsiness Detection(1).ipynb @@ -0,0 +1,861 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ffaddfd9", + "metadata": {}, + "source": [ + "# Install and import Dependencied" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "97c19bbd", + "metadata": { + "collapsed": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: torch in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (2.2.2)\n", + "Requirement already satisfied: torchvision in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (0.17.2)\n", + "Requirement already satisfied: torchaudio in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (2.2.2)\n", + "Requirement already satisfied: filelock in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (3.9.0)\n", + "Requirement already satisfied: typing-extensions>=4.8.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (4.9.0)\n", + "Requirement already satisfied: sympy in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (1.11.1)\n", + "Requirement already satisfied: networkx in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (2.8.4)\n", + "Requirement already satisfied: jinja2 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (3.1.2)\n", + "Requirement already satisfied: fsspec in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (2023.4.0)\n", + "Requirement already satisfied: numpy in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torchvision) (1.24.3)\n", + "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torchvision) (10.3.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from jinja2->torch) (2.1.1)\n", + "Requirement already satisfied: mpmath>=0.19 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from sympy->torch) (1.2.1)\n" + ] + } + ], + "source": [ + "!pip install torch torchvision torchaudio" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "4d3b462d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "fatal: destination path 'yolov5' already exists and is not an empty directory.\r\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "bcd117c6", + "metadata": { + "collapsed": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: gitpython>=3.1.30 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 5)) (3.1.41)\n", + "Requirement already satisfied: matplotlib>=3.3 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 6)) (3.7.1)\n", + "Requirement already satisfied: numpy>=1.23.5 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 7)) (1.24.3)\n", + "Requirement already satisfied: opencv-python>=4.1.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 8)) (4.9.0.80)\n", + "Requirement already satisfied: Pillow>=10.0.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 9)) (10.3.0)\n", + "Requirement already satisfied: psutil in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 10)) (5.9.0)\n", + "Requirement already satisfied: PyYAML>=5.3.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 11)) (6.0)\n", + "Requirement already satisfied: requests>=2.23.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 12)) (2.29.0)\n", + "Requirement already satisfied: scipy>=1.4.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 13)) (1.10.1)\n", + "Requirement already satisfied: thop>=0.1.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 14)) (0.1.1.post2209072238)\n", + "Requirement already satisfied: torch>=1.8.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 15)) (2.2.2)\n", + "Requirement already satisfied: torchvision>=0.9.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 16)) (0.17.2)\n", + "Requirement already satisfied: tqdm>=4.64.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 17)) (4.65.0)\n", + "Requirement already satisfied: ultralytics>=8.0.232 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 18)) (8.1.44)\n", + "Requirement already satisfied: pandas>=1.1.4 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 27)) (1.5.3)\n", + "Requirement already satisfied: seaborn>=0.11.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 28)) (0.12.2)\n", + "Requirement already satisfied: setuptools>=65.5.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 42)) (67.8.0)\n", + "Requirement already satisfied: gitdb<5,>=4.0.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from gitpython>=3.1.30->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 5)) (4.0.11)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from matplotlib>=3.3->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 6)) (1.0.5)\n", + "Requirement already satisfied: cycler>=0.10 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from matplotlib>=3.3->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 6)) (0.11.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from matplotlib>=3.3->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 6)) (4.25.0)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from matplotlib>=3.3->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 6)) (1.4.4)\n", + "Requirement already satisfied: packaging>=20.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from matplotlib>=3.3->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 6)) (23.0)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from matplotlib>=3.3->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 6)) (3.0.9)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from matplotlib>=3.3->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 6)) (2.8.2)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from requests>=2.23.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 12)) (2.0.4)\n", + "Requirement already satisfied: idna<4,>=2.5 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from requests>=2.23.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 12)) (3.4)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from requests>=2.23.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 12)) (1.26.16)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from requests>=2.23.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 12)) (2023.7.22)\n", + "Requirement already satisfied: filelock in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch>=1.8.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 15)) (3.9.0)\n", + "Requirement already satisfied: typing-extensions>=4.8.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch>=1.8.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 15)) (4.9.0)\n", + "Requirement already satisfied: sympy in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch>=1.8.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 15)) (1.11.1)\n", + "Requirement already satisfied: networkx in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch>=1.8.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 15)) (2.8.4)\n", + "Requirement already satisfied: jinja2 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch>=1.8.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 15)) (3.1.2)\n", + "Requirement already satisfied: fsspec in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch>=1.8.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 15)) (2023.4.0)\n", + "Requirement already satisfied: py-cpuinfo in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from ultralytics>=8.0.232->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 18)) (8.0.0)\n", + "Requirement already satisfied: pytz>=2020.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from pandas>=1.1.4->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 27)) (2022.7)\n", + "Requirement already satisfied: smmap<6,>=3.0.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from gitdb<5,>=4.0.1->gitpython>=3.1.30->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 5)) (5.0.1)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: six>=1.5 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from python-dateutil>=2.7->matplotlib>=3.3->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 6)) (1.16.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from jinja2->torch>=1.8.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 15)) (2.1.1)\n", + "Requirement already satisfied: mpmath>=0.19 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from sympy->torch>=1.8.0->-r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt (line 15)) (1.2.1)\n" + ] + } + ], + "source": [ + "!cd yolov5 & pip install -r /Users/praveenchalla/Desktop/machine_Learning/yolov5/requirements.txt" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "469014ef", + "metadata": { + "collapsed": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: GitPython in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (3.1.41)\n", + "Requirement already satisfied: gitdb<5,>=4.0.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from GitPython) (4.0.11)\n", + "Requirement already satisfied: smmap<6,>=3.0.1 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from gitdb<5,>=4.0.1->GitPython) (5.0.1)\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "pip install --timeout=60 GitPython" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "0577ef0c", + "metadata": {}, + "outputs": [], + "source": [ + "import torch \n", + "from matplotlib import pyplot as plt\n", + "import numpy as np\n", + "import cv2" + ] + }, + { + "cell_type": "markdown", + "id": "01eb892c", + "metadata": {}, + "source": [ + "# Load Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f78a7ee4", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "fb476212", + "metadata": {}, + "outputs": [], + "source": [ + "rm -rf /Users/praveenchalla/.cache/torch/hub" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "ce921740", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading: \"https://github.com/ultralytics/yolov5/zipball/master\" to /Users/praveenchalla/.cache/torch/hub/master.zip\n", + "YOLOv5 🚀 2024-4-14 Python-3.11.3 torch-2.2.2 CPU\n", + "\n", + "Fusing layers... \n", + "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients, 16.4 GFLOPs\n", + "Adding AutoShape... \n" + ] + } + ], + "source": [ + "model =torch.hub.load('ultralytics/yolov5','yolov5s')" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "7eb8a6c2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AutoShape(\n", + " (model): DetectMultiBackend(\n", + " (model): DetectionModel(\n", + " (model): Sequential(\n", + " (0): Conv(\n", + " (conv): Conv2d(3, 32, kernel_size=(6, 6), stride=(2, 2), padding=(2, 2))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (1): Conv(\n", + " (conv): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (2): C3(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv3): Conv(\n", + " (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (m): Sequential(\n", + " (0): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (3): Conv(\n", + " (conv): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (4): C3(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv3): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (m): Sequential(\n", + " (0): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (5): Conv(\n", + " (conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (6): C3(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv3): Conv(\n", + " (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (m): Sequential(\n", + " (0): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " (1): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " (2): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (7): Conv(\n", + " (conv): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (8): C3(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv3): Conv(\n", + " (conv): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (m): Sequential(\n", + " (0): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (9): SPPF(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (m): MaxPool2d(kernel_size=5, stride=1, padding=2, dilation=1, ceil_mode=False)\n", + " )\n", + " (10): Conv(\n", + " (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (11): Upsample(scale_factor=2.0, mode='nearest')\n", + " (12): Concat()\n", + " (13): C3(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv3): Conv(\n", + " (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (m): Sequential(\n", + " (0): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (14): Conv(\n", + " (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (15): Upsample(scale_factor=2.0, mode='nearest')\n", + " (16): Concat()\n", + " (17): C3(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv3): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (m): Sequential(\n", + " (0): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (18): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (19): Concat()\n", + " (20): C3(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv3): Conv(\n", + " (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (m): Sequential(\n", + " (0): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (21): Conv(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (22): Concat()\n", + " (23): C3(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv3): Conv(\n", + " (conv): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (m): Sequential(\n", + " (0): Bottleneck(\n", + " (cv1): Conv(\n", + " (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " (cv2): Conv(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (act): SiLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (24): Detect(\n", + " (m): ModuleList(\n", + " (0): Conv2d(128, 255, kernel_size=(1, 1), stride=(1, 1))\n", + " (1): Conv2d(256, 255, kernel_size=(1, 1), stride=(1, 1))\n", + " (2): Conv2d(512, 255, kernel_size=(1, 1), stride=(1, 1))\n", + " )\n", + " )\n", + " )\n", + " )\n", + " )\n", + ")" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "d0a69a37", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: torch in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (2.2.2)\n", + "Requirement already satisfied: torchvision in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (0.17.2)\n", + "Requirement already satisfied: filelock in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (3.9.0)\n", + "Requirement already satisfied: typing-extensions>=4.8.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (4.9.0)\n", + "Requirement already satisfied: sympy in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (1.11.1)\n", + "Requirement already satisfied: networkx in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (2.8.4)\n", + "Requirement already satisfied: jinja2 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (3.1.2)\n", + "Requirement already satisfied: fsspec in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torch) (2023.4.0)\n", + "Requirement already satisfied: numpy in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torchvision) (1.24.3)\n", + "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from torchvision) (10.3.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from jinja2->torch) (2.1.1)\n", + "Requirement already satisfied: mpmath>=0.19 in /Users/praveenchalla/anaconda3/lib/python3.11/site-packages (from sympy->torch) (1.2.1)\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "pip install --upgrade torch torchvision" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "136d7cc8", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "pip install --upgrade ultralytics" + ] + }, + { + "cell_type": "markdown", + "id": "14be5e35", + "metadata": {}, + "source": [ + "\n", + "# Make detection with images checking using a image but we can add any vedio in this link it will detect" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "200e7d34", + "metadata": {}, + "outputs": [], + "source": [ + "img='https://newsmeter.in/h-upload/2023/07/26/351034-whatsapp-image-2023-07-26-at-122903-pm.webp'" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "93ef8e0f", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "image 1/1: 1272x1280 11 persons, 22 cars, 7 motorcycles, 6 buss, 4 trucks, 1 umbrella\n", + "Speed: 876.0ms pre-process, 545.2ms inference, 11.8ms NMS per image at shape (1, 3, 640, 640)\n" + ] + } + ], + "source": [ + "results=model(img)\n", + "results.print()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b03131b", + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "plt.imshow(np.squeeze(results.render()))\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a955178e", + "metadata": {}, + "outputs": [], + "source": [ + "plt.imshow(np.squeeze(results.render()))" + ] + }, + { + "cell_type": "markdown", + "id": "3f5dd0dd", + "metadata": {}, + "source": [ + "# Real time detection\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38df6781", + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "cap=cv2.VideoCapture(0)\n", + "while cap.isOpened():\n", + " ret,frame = cap.read()\n", + " \n", + " #Make detections\n", + " results=model(frame)\n", + " \n", + " cv2.imshow('YOLO',np.squeeze(results.render()))\n", + " \n", + " \n", + " if cv2.waitKey(10) & 0xFF == ord('q'):\n", + " break\n", + "cap.release()\n", + "cv2.destroyAllWindows()" + ] + }, + { + "cell_type": "markdown", + "id": "a8fee930", + "metadata": {}, + "source": [ + "# Train from scrach and Test " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff83e1b7", + "metadata": {}, + "outputs": [], + "source": [ + "import uuid #Unique identify\n", + "import os \n", + "import time #taking time for each bit\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7b0adac", + "metadata": {}, + "outputs": [], + "source": [ + "IMAGES_PATH=os.path.join('data','images') #/data/images\n", + "labels=['awake','drowsy'] #two classes\n", + "number_imgs=20" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a471c356", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "cap = cv2.VideoCapture(0)\n", + "#loop through labels\n", + "for label in labels:\n", + " print('Collecting images for {}'.format(label))\n", + " time.sleep(5)\n", + " \n", + " #loop through images\n", + " for img_num in range(number_imgs):\n", + " print('Collecting images for {}, image number {}'.format(label,img_num))\n", + " \n", + " #webcam feed\n", + " ret, frame=cap.read()\n", + " \n", + " #Naming out image path\n", + " imgname = os.path.join(IMAGES_PATH,label+','+str(uuid.uuid1())+\".jpg\")\n", + " \n", + " #writes out image to file\n", + " cv2.imwrite(imgname,frame)\n", + " \n", + " #render to the screen\n", + " cv2.imshow('Image Collection',frame)\n", + " \n", + " # 2 second delay between captures\n", + " time.sleep(3)\n", + " \n", + " if cv2.waitKey(10) & 0xFF == ord('q'):\n", + " break\n", + "cap.release()\n", + "cv2.destroyAllWindows()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c235b43", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "377eb5a5", + "metadata": {}, + "outputs": [], + "source": [ + "print(os.path.join(IMAGES_PATH,labels[0]+'.'+str(uuid.uuid1()) + '.jpg'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "800d8629", + "metadata": {}, + "outputs": [], + "source": [ + "labels[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bbb59cae", + "metadata": {}, + "outputs": [], + "source": [ + "for label in labels:\n", + " print('Collecting images for {}'.format(label))\n", + " for img_num in range(number_imgs):\n", + " print('Collecting images for {}, image number {}'.format(label,img_num))\n", + " imgname = os.path.join(IMAGES_PATH,label+'.'+str(uuid.uuid1())+\".jpg\")\n", + " print(imgname)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d1caf6c8", + "metadata": {}, + "outputs": [], + "source": [ + "!git clone https://github.com/HumanSignal/labelImg" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "68c2b418", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install pyqt5 lxml --upgrade\n", + "!cd labelImg && pyrcc5 -o libs/resources.py resources.qrc" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c78597b", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!cd yolov5 && python train.py --img 320 --batch 16 --epochs 5 --data dataset.yaml --weights yolov5s.pt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24d2c865", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5646cd8", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/Neural Networks/Drowsiness detection/Drowsiness_README.md b/Neural Networks/Drowsiness detection/Drowsiness_README.md new file mode 100644 index 00000000..c5fa3ad3 --- /dev/null +++ b/Neural Networks/Drowsiness detection/Drowsiness_README.md @@ -0,0 +1,91 @@ +# Drowsiness-Detection-with-YoloV5 + + This repository consists of a drowsiness detection based on YOLOv5 implementation. You can reach the base repo [here](https://github.com/ultralytics/yolov5) + + +## 1. Prepared Custom Data Set Even Using real Time data + +A custom data set was prepared for this project. Videos were taken from 21 different people in scenarios that could happen while driving. Three different categories were discussed in these videos: normal, yawning and head position. Various light conditions and the use of glasses were taken into account. A total of 63 videos were obtained and labeling was done according to the method to be used. + + +## 2. Labeling Phase + +The LabelImg program can be used for labeling in projects where the object detection method is used. Supports PASCAL VOC , YOLO and CreateML formats. Since training is done with Yolov5 in this project, the data is labeled in txt format. Turkish characters should not be used in labels. + +### 2.1 LabelImg Installation for Windows: + +**Get repo** + + `git clone https://github.com/tzutalin/labelImg.git` + +**After creating and activating the virtual or anaconda environment, the following lines of code are run on the cmd screen.** + +`pip install PyQt5` + +`pip install lxml` + +`pyrcc5 -o libs/resources.py resources.qrc` + +**When the code below is run, LabelImg will be opened. For subsequent uses, it is sufficient to perform only last step.** + +`python labelImg.py` + +**Notes: After installing LabelImg, the ”predefined_classes.txt” file in the data folder can be emptied or the classes to be used can be written. In this way, problems that may occur during the labeling phase are prevented.** + +![predefined_classes](https://user-images.githubusercontent.com/73580507/159132999-55ba4f21-48c3-40d6-a70d-9a3431de3bfb.png) + +**There are 1975 labeled images in total for model training. 80% of this data is split as train and 20% as test. 4 classes were used as “normal, drowsy, drowsy#2, yawning”. "drowsy" includes eyes closed but head is upright, "drowsy#2" includes head dropping forward. It is labeled in two different ways so that the model does not make the wrong decision.** + + +## 3. Training Phase + +**While the Yolov5 algorithm is preferred because it can produce high accuracy results even with little data, it is preferred because the nano model can be developed on embedded devices and the model takes up little space. The data folder structure should be as follows:** + +![data_format](https://user-images.githubusercontent.com/73580507/159135000-635c7787-81eb-4c70-a2b6-47c0f54bdcc8.png) + + +### 3.1 Editing YAML files + +**The data.yaml file holds the number and names of labels, the file path of the train and test data. This file should be located in the yolov5/data folder.** + +![data_yaml](https://user-images.githubusercontent.com/73580507/159135929-206f18ec-e1fd-4281-bb69-d24bc425d3cd.png) + +**The nc value in the yolov5n_drowsy.yaml file has been changed to 4 as it represents the number of classes. This file should be located in the yolov5/models folder.** + +### 3.2 Training of the Model + +``` +python train.py --resume --imgsz 640 --batch 16 --epochs 600 --data data/data.yaml --cfg models/yolov5n_drowsy.yaml --weights weights/yolov5n.pt --name drowsy_result --cache --device 0 +``` +**The training is complete, as the model performed well at 173 epochs.** + + +## 4. Drowsiness Detection with Trained Model + +``` +python drowsy_detect.py --weights runs/train/drowsy_result/weights/best.pt --source data/drowsy_training/test/images --hide-conf +``` + +**Check this file [drowsy_training_with_yolov5.ipynb](https://github.com/suhedaras/Drowsiness-Detection-with-YoloV5/blob/main/drowsy_training_with_yolov5.ipynb) for training** + + +## 5. Result + +### 5.1 Approach 1 + + + ![app1](https://user-images.githubusercontent.com/73580507/159136371-943b6761-0a8f-44af-a471-ff0b78d18514.gif) + +![frame02-1072](https://user-images.githubusercontent.com/73580507/159136614-4a2a4509-e354-4df2-9455-cb01f339e317.jpg)![frame02-2132](https://user-images.githubusercontent.com/73580507/159136623-c5deb6c9-9e69-4166-a8c3-828a30b157c0.jpg) + + +### 5.2 Approach 2 + + + ![nhtu](https://user-images.githubusercontent.com/73580507/159136464-5e057cc1-fc47-4dc0-be63-1bccd94028c6.gif) + +![frame13-1120](https://user-images.githubusercontent.com/73580507/159136568-20e91a0a-8b6f-4e97-8ec5-dbad7bb624bc.jpg)![frame13-2006](https://user-images.githubusercontent.com/73580507/159136580-4707b37d-47e2-4063-90f3-18d1cb500b05.jpg) + + + +