diff --git a/docs/src/pages/post/_assets/download-jan.jpg b/docs/src/pages/post/_assets/download-jan.jpg
new file mode 100644
index 0000000000..f799260c7f
Binary files /dev/null and b/docs/src/pages/post/_assets/download-jan.jpg differ
diff --git a/docs/src/pages/post/_assets/hugging-face-jan-model-download.jpg b/docs/src/pages/post/_assets/hugging-face-jan-model-download.jpg
new file mode 100644
index 0000000000..c6cfa8ea5a
Binary files /dev/null and b/docs/src/pages/post/_assets/hugging-face-jan-model-download.jpg differ
diff --git a/docs/src/pages/post/_assets/jan-hf-model-download.jpg b/docs/src/pages/post/_assets/jan-hf-model-download.jpg
new file mode 100644
index 0000000000..929acf2ffe
Binary files /dev/null and b/docs/src/pages/post/_assets/jan-hf-model-download.jpg differ
diff --git a/docs/src/pages/post/_assets/jan-hub-deepseek-r1.jpg b/docs/src/pages/post/_assets/jan-hub-deepseek-r1.jpg
new file mode 100644
index 0000000000..12c0c66404
Binary files /dev/null and b/docs/src/pages/post/_assets/jan-hub-deepseek-r1.jpg differ
diff --git a/docs/src/pages/post/_assets/jan-hub-download-deepseek-r1-2.jpg b/docs/src/pages/post/_assets/jan-hub-download-deepseek-r1-2.jpg
new file mode 100644
index 0000000000..24be4bd25d
Binary files /dev/null and b/docs/src/pages/post/_assets/jan-hub-download-deepseek-r1-2.jpg differ
diff --git a/docs/src/pages/post/_assets/jan-hub-download-deepseek-r1.jpg b/docs/src/pages/post/_assets/jan-hub-download-deepseek-r1.jpg
new file mode 100644
index 0000000000..83d9ab3701
Binary files /dev/null and b/docs/src/pages/post/_assets/jan-hub-download-deepseek-r1.jpg differ
diff --git a/docs/src/pages/post/_assets/jan-library-deepseek-r1.jpg b/docs/src/pages/post/_assets/jan-library-deepseek-r1.jpg
new file mode 100644
index 0000000000..6a54082dc1
Binary files /dev/null and b/docs/src/pages/post/_assets/jan-library-deepseek-r1.jpg differ
diff --git a/docs/src/pages/post/_assets/jan-local-ai.jpg b/docs/src/pages/post/_assets/jan-local-ai.jpg
new file mode 100644
index 0000000000..2c8c145ff5
Binary files /dev/null and b/docs/src/pages/post/_assets/jan-local-ai.jpg differ
diff --git a/docs/src/pages/post/_assets/jan-model-download.jpg b/docs/src/pages/post/_assets/jan-model-download.jpg
new file mode 100644
index 0000000000..7e949403d0
Binary files /dev/null and b/docs/src/pages/post/_assets/jan-model-download.jpg differ
diff --git a/docs/src/pages/post/_assets/jan-runs-deepseek-r1-distills.jpg b/docs/src/pages/post/_assets/jan-runs-deepseek-r1-distills.jpg
new file mode 100644
index 0000000000..02ce847f4f
Binary files /dev/null and b/docs/src/pages/post/_assets/jan-runs-deepseek-r1-distills.jpg differ
diff --git a/docs/src/pages/post/_assets/jan-system-prompt-deepseek-r1.jpg b/docs/src/pages/post/_assets/jan-system-prompt-deepseek-r1.jpg
new file mode 100644
index 0000000000..f79e71af06
Binary files /dev/null and b/docs/src/pages/post/_assets/jan-system-prompt-deepseek-r1.jpg differ
diff --git a/docs/src/pages/post/_assets/local-ai-model-parameters.jpg b/docs/src/pages/post/_assets/local-ai-model-parameters.jpg
new file mode 100644
index 0000000000..1d26fc4a5c
Binary files /dev/null and b/docs/src/pages/post/_assets/local-ai-model-parameters.jpg differ
diff --git a/docs/src/pages/post/_assets/open-source-ai-quantization.jpg b/docs/src/pages/post/_assets/open-source-ai-quantization.jpg
new file mode 100644
index 0000000000..fe605c3cdc
Binary files /dev/null and b/docs/src/pages/post/_assets/open-source-ai-quantization.jpg differ
diff --git a/docs/src/pages/post/_assets/run-deepseek-r1-locally-in-jan.jpg b/docs/src/pages/post/_assets/run-deepseek-r1-locally-in-jan.jpg
new file mode 100644
index 0000000000..aa69805856
Binary files /dev/null and b/docs/src/pages/post/_assets/run-deepseek-r1-locally-in-jan.jpg differ
diff --git a/docs/src/pages/post/deepseek-r1-locally.mdx b/docs/src/pages/post/deepseek-r1-locally.mdx
new file mode 100644
index 0000000000..32ed5816a6
--- /dev/null
+++ b/docs/src/pages/post/deepseek-r1-locally.mdx
@@ -0,0 +1,115 @@
+---
+title: "Beginner's Guide: Run DeepSeek R1 Locally"
+description: "A straightforward guide to running DeepSeek R1 locally for enhanced privacy, regardless of your background."
+tags: DeepSeek, R1, local AI, Jan, GGUF, Qwen, Llama
+categories: guides
+date: 2024-01-31
+ogImage: assets/run-deepseek-r1-locally-in-jan.jpg
+---
+
+import { Callout } from 'nextra/components'
+import CTABlog from '@/components/Blog/CTA'
+
+# Beginner's Guide: Run DeepSeek R1 Locally
+
+![image](./_assets/run-deepseek-r1-locally-in-jan.jpg)
+
+DeepSeek R1 brings state-of-the-art AI capabilities to your local machine. With optimized versions available for different hardware configurations, you can run this powerful model directly on your laptop or desktop computer. This guide will show you how to run open-source AI models like DeepSeek, Llama, or Mistral locally on your computer, regardless of your background.
+
+Why use an optimized version?
+- Efficient performance on standard hardware
+- Faster download and initialization
+- Optimized storage requirements
+- Maintains most of the original model's capabilities
+
+## Quick Steps at a Glance
+1. Download [Jan](https://jan.ai/)
+2. Select a model version suited to your hardware
+3. Configure optimal settings
+4. Set up the prompt template & begin interacting
+
+Let's walk through each step with detailed instructions.
+
+## Step 1: Download Jan
+[Jan](https://jan.ai/) is an open-source application that enables you to run AI models locally. It's available for Windows, Mac, and Linux, with a streamlined setup process.
+
+![image](./_assets/download-jan.jpg)
+
+1. Visit [jan.ai](https://jan.ai)
+2. Download the appropriate version for your operating system
+3. Follow the standard installation process
+
+## Step 2: Choose Your DeepSeek R1 Version
+DeepSeek R1 is available in different architectures and sizes. Here's how to select the right version for your system.
+
+
+To check your system's VRAM:
+- Windows: Press Windows + R, type "dxdiag", press Enter, click "Display" tab
+- Mac: Apple menu > About This Mac > More Info > Graphics/Displays
+- Linux: Open Terminal, run `nvidia-smi` (NVIDIA GPUs) or `lspci -v | grep -i vga`
+
+
+Understanding the versions:
+- **Qwen architecture:** Optimized for efficiency while maintaining high performance
+- **Llama architecture:** Known for robust performance and reliability
+- **Original vs Distilled:** Distilled versions are optimized models that preserve core capabilities while reducing resource requirements
+
+| Version | Model Link | Required VRAM |
+|---------|------------|---------------|
+| Qwen 1.5B | [DeepSeek-R1-Distill-Qwen-1.5B-GGUF](https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF) | 6GB+ |
+| Qwen 7B | [DeepSeek-R1-Distill-Qwen-7B-GGUF](https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF) | 8GB+ |
+| Llama 8B | [DeepSeek-R1-Distill-Llama-8B-GGUF](https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF) | 8GB+ |
+| Qwen 14B | [DeepSeek-R1-Distill-Qwen-14B-GGUF](https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF) | 16GB+ |
+| Qwen 32B | [DeepSeek-R1-Distill-Qwen-32B-GGUF](https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-32B-GGUF) | 16GB+ |
+| Llama 70B | [DeepSeek-R1-Distill-Llama-70B-GGUF](https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-70B-GGUF) | 48GB+ |
+
+
+Recommendations based on your hardware:
+- 6GB VRAM: The 1.5B version offers efficient performance
+- 8GB VRAM: 7B or 8B versions provide a balanced experience
+- 16GB+ VRAM: Access to larger models for enhanced capabilities
+
+
+To download your chosen model:
+1. Launch Jan and navigate to Jan Hub using the sidebar
+2. Locate the "Add Model" section:
+
+![image](./_assets/jan-library-deepseek-r1.jpg)
+
+3. Input the model link in the designated field:
+
+![image](./_assets/jan-hub-deepseek-r1.jpg)
+
+## Step 3: Configure Model Settings
+When configuring your model, you'll encounter quantization options:
+
+
+Quantization balances performance and resource usage:
+- **Q4:** Recommended for most users - optimal balance of efficiency and quality
+- **Q8:** Higher precision but requires more computational resources
+
+
+## Step 4: Configure Prompt Template
+Final configuration step:
+
+1. Access Model Settings via the sidebar
+2. Locate the Prompt Template configuration
+3. Use this specific format:
+
+
+```
+<|User|>{prompt}<|Assistant|>
+```
+
+
+This template ensures proper communication between you and the model.
+
+You're now ready to interact with DeepSeek R1:
+
+![image](./_assets/jan-runs-deepseek-r1-distills.jpg)
+
+## Need Assistance?
+
+
+Join our [Discord community](https://discord.gg/Exe46xPMbK) for support and discussions about running AI models locally.
+
diff --git a/docs/src/pages/post/run-ai-models-locally.mdx b/docs/src/pages/post/run-ai-models-locally.mdx
new file mode 100644
index 0000000000..c3570f63a3
--- /dev/null
+++ b/docs/src/pages/post/run-ai-models-locally.mdx
@@ -0,0 +1,200 @@
+---
+title: "How to Run AI Models Locally: A Beginner's Guide"
+description: "A straightforward guide to running AI models locally on your computer, regardless of your background."
+tags: AI, local models, Jan, GGUF, privacy, local AI
+categories: guides
+date: 2024-01-31
+ogImage: assets/jan-local-ai.jpg
+---
+
+import { Callout } from 'nextra/components'
+import CTABlog from '@/components/Blog/CTA'
+
+# How to Run AI Models Locally: A Beginner's Guide
+
+DeepSeek R1 is one of the best open-source models in the market right now, and the best part is that we can run different versions of it on our laptop. This guide will show you how to run open-source AI models like DeepSeek, Llama, or Mistral locally on your computer, regardless of your background.
+
+## Quick steps:
+1. Download [Jan](https://jan.ai)
+2. Pick a recommended model
+3. Start chatting
+
+Read [Quickstart](https://jan.ai/docs/quickstart) to get started. For more details, keep reading.
+
+![Run AI models locally with Jan](./_assets/jan-local-ai.jpg)
+*Jan is for running AI models locally. Download [Jan](https://jan.ai)*
+
+
+Benefits of running AI locally:
+- **Privacy:** Your data stays on your computer
+- **No internet needed:** Use AI even offline
+- **No limits:** Chat as much as you want
+- **Full control:** Choose which AI models to use
+
+
+## How to run AI models locally as a beginner
+
+[Jan](https://jan.ai) makes it straightforward to run AI models. Download Jan and you're ready to go - the setup process is streamlined and automated.
+
+
+What you can do with Jan:
+- Download AI models with one click
+- Everything is set up automatically
+- Find models that work on your computer
+
+
+## Understanding Local AI models
+
+Think of AI models like engines powering applications - some are compact and efficient, while others are more powerful but require more resources. Let's understand two important terms you'll see often: parameters and quantization.
+
+### What's a "Parameter"?
+
+When looking at AI models, you'll see names like "Llama-2-7B" or "Mistral-7B". Here's what that means:
+
+![AI model parameters explained](./_assets/local-ai-model-parameters.jpg)
+*Model sizes: Bigger models = Better results + More resources*
+
+- The "B" means "billion parameters" (like brain cells)
+- More parameters = smarter AI but needs a faster computer
+- Fewer parameters = simpler AI but works on most computers
+
+
+Which size to choose?
+- **7B models:** Best for most people - works on most computers
+- **13B models:** Smarter but needs a good graphics card
+- **70B models:** Very smart but needs a powerful computer
+
+
+### What's Quantization?
+
+Quantization is a technique that optimizes AI models to run efficiently on your computer. Think of it like an engine tuning process that balances performance with resource usage:
+
+![AI model quantization explained](./_assets/open-source-ai-quantization.jpg)
+*Quantization: Balance between size and quality*
+
+Simple guide:
+- **Q4:** Most efficient choice - good balance of speed and quality
+- **Q6:** Enhanced quality with moderate resource usage
+- **Q8:** Highest quality but requires more computational power
+
+
+Understanding model versions:
+- **Original models:** Full-sized versions with maximum capability (e.g., original DeepSeek)
+- **Distilled models:** Optimized versions that maintain good performance while using fewer resources
+- When you see names like "Qwen" or "Llama", these refer to different model architectures and training approaches
+
+
+Example: A 7B model with Q4 quantization provides an excellent balance for most users.
+
+## Hardware Requirements
+
+Before downloading an AI model, let's check if your computer can run it.
+
+
+The most important thing is VRAM:
+- VRAM is your graphics card's memory
+- More VRAM = ability to run bigger AI models
+- Most computers have between 4GB to 16GB VRAM
+
+
+### How to check your VRAM:
+
+**On Windows:**
+1. Press Windows + R
+2. Type "dxdiag" and press Enter
+3. Click "Display" tab
+4. Look for "Display Memory"
+
+**On Mac:**
+1. Click Apple menu
+2. Select "About This Mac"
+3. Click "More Info"
+4. Look under "Graphics/Displays"
+
+**On Linux:**
+1. Open Terminal
+2. Run: `nvidia-smi` (for NVIDIA GPUs)
+3. Or: `lspci -v | grep -i vga` (for general GPU info)
+
+### Which models can you run?
+
+Here's a simple guide:
+
+| Your VRAM | What You Can Run | What It Can Do |
+|-----------|-----------------|----------------|
+| 4GB | Small models (1-3B) | Basic writing and questions |
+| 6GB | Medium models (7B) | Good for most tasks |
+| 8GB | Larger models (13B) | Better understanding |
+| 16GB | Largest models (32B) | Best performance |
+
+
+Start with smaller models:
+- Try 7B models first - they work well for most people
+- Test how they run on your computer
+- Try larger models only if you need better results
+
+
+## Setting Up Your Local AI
+
+### 1. Get Started
+Download Jan from [jan.ai](https://jan.ai) - it sets everything up for you.
+
+### 2. Get an AI Model
+
+You can get models two ways:
+
+### 1. Use Jan Hub (Recommended):
+ - Click "Download Model" in Jan
+ - Pick a recommended model
+ - Choose one that fits your computer
+
+![AI model parameters explained](./_assets/jan-model-download.jpg)
+*Use Jan Hub to download AI models*
+
+### 2. Use Hugging Face:
+
+
+Important: Only GGUF models will work with Jan. Make sure to use models that have "GGUF" in their name.
+
+
+#### Step 1: Get the model link
+Find and copy a GGUF model link from [Hugging Face](https://huggingface.co)
+
+![Finding a GGUF model on Hugging Face](./_assets/hugging-face-jan-model-download.jpg)
+*Look for models with "GGUF" in their name*
+
+#### Step 2: Open Jan
+Launch Jan and go to the Models tab
+
+![Opening Jan's model section](./_assets/jan-library-deepseek-r1.jpg)
+*Navigate to the Models section in Jan*
+
+#### Step 3: Add the model
+Paste your Hugging Face link into Jan
+
+![Adding a model from Hugging Face](./_assets/jan-hub-deepseek-r1.jpg)
+*Paste your GGUF model link here*
+
+#### Step 4: Download
+Select your quantization and start the download
+
+![Downloading the model](./_assets/jan-hf-model-download.jpg)
+*Choose your preferred model size and download*
+
+### Common Questions
+
+
+**"My computer doesn't have a graphics card - can I still use AI?"**
+Yes! It will run slower but still work. Start with 7B models.
+
+**"Which model should I start with?"**
+Try a 7B model first - it's the best balance of smart and fast.
+
+**"Will it slow down my computer?"**
+Only while you're using the AI. Close other big programs for better speed.
+
+
+## Need help?
+
+Having trouble? We're here to help! [Join our Discord community](https://discord.gg/Exe46xPMbK) for support.
+
\ No newline at end of file