diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..f1d78be3 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +# A tool used by developers to identify spelling errors in text. +# Readers may ignore this file. + +default_stages: [commit] + +repos: + - repo: https://github.com/codespell-project/codespell + rev: v2.3.0 + hooks: + - id: codespell + name: codespell + description: Check for spelling errors in text. + entry: codespell + language: python + args: + - "-L ocassion,occassion,ot,te,tje" + files: \.txt$|\.md$|\.py|\.ipynb$ diff --git a/ch03/02_bonus_efficient-multihead-attention/mha-implementations.ipynb b/ch03/02_bonus_efficient-multihead-attention/mha-implementations.ipynb index 918ce00c..be65120f 100644 --- a/ch03/02_bonus_efficient-multihead-attention/mha-implementations.ipynb +++ b/ch03/02_bonus_efficient-multihead-attention/mha-implementations.ipynb @@ -317,7 +317,7 @@ "id": "f78e346f-3b85-44e6-9feb-f01131381148" }, "source": [ - "- The implementation below uses PyTorch's [`scaled_dot_product_attention`](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) function, which implements a memory-optimized version of self-attention calld [flash attention](https://arxiv.org/abs/2205.14135)" + "- The implementation below uses PyTorch's [`scaled_dot_product_attention`](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) function, which implements a memory-optimized version of self-attention called [flash attention](https://arxiv.org/abs/2205.14135)" ] }, { diff --git a/ch04/01_main-chapter-code/ch04.ipynb b/ch04/01_main-chapter-code/ch04.ipynb index 5d1732a2..6d4a0c30 100644 --- a/ch04/01_main-chapter-code/ch04.ipynb +++ b/ch04/01_main-chapter-code/ch04.ipynb @@ -1043,7 +1043,7 @@ "id": "dec7d03d-9ff3-4ca3-ad67-01b67c2f5457", "metadata": {}, "source": [ - "- We are almost there: now let's plug in the transformer block into the architecture we coded at the very beginning of this chapter so that we obtain a useable GPT architecture\n", + "- We are almost there: now let's plug in the transformer block into the architecture we coded at the very beginning of this chapter so that we obtain a usable GPT architecture\n", "- Note that the transformer block is repeated multiple times; in the case of the smallest 124M GPT-2 model, we repeat it 12 times:" ] }, diff --git a/ch06/02_bonus_additional-experiments/additional-experiments.py b/ch06/02_bonus_additional-experiments/additional-experiments.py index 757bcb2c..6246c61b 100644 --- a/ch06/02_bonus_additional-experiments/additional-experiments.py +++ b/ch06/02_bonus_additional-experiments/additional-experiments.py @@ -370,7 +370,7 @@ def replace_linear_with_lora(model, rank, alpha): action='store_true', default=False, help=( - "Disable padding, which means each example may have a different lenght." + "Disable padding, which means each example may have a different length." " This requires setting `--batch_size 1`." ) ) diff --git a/ch07/02_dataset-utilities/create-passive-voice-entries.ipynb b/ch07/02_dataset-utilities/create-passive-voice-entries.ipynb index 04d5b4cc..c5029e6c 100644 --- a/ch07/02_dataset-utilities/create-passive-voice-entries.ipynb +++ b/ch07/02_dataset-utilities/create-passive-voice-entries.ipynb @@ -166,7 +166,7 @@ " return response.choices[0].message.content\n", "\n", "\n", - "# Prepare intput\n", + "# Prepare input\n", "sentence = \"I ate breakfast\"\n", "prompt = f\"Convert the following sentence to passive voice: '{sentence}'\"\n", "run_chatgpt(prompt, client)"