-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpackage.json
100 lines (100 loc) · 3.33 KB
/
package.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
{
"name": "review-gpt",
"displayName": "review-gpt",
"description": "An automated code review tool powered by gpt3",
"license": "GNU GPLv3.0",
"publisher": "reviewgpt",
"icon": "assets/logo/shrinked_logo.png",
"repository": {
"type": "git",
"url": "https://github.com/vibovenkat123/rgpt-vscode"
},
"version": "1.2.8",
"engines": {
"vscode": "^1.75.0"
},
"categories": [
"Other"
],
"activationEvents": [
"review-gpt.review"
],
"main": "./out/extension.js",
"keywords": [
"reviewgpt",
"gpt",
"code review",
"review",
"vibovenkat",
"vibovenkat123"
],
"contributes": {
"configuration": {
"title": "rgpt",
"properties": {
"rgpt.model": {
"type": "string",
"default": "turbo",
"description": "The model for GPT to use"
},
"rgpt.max_tokens": {
"type": "integer",
"default": 500,
"description": "The maximum tokens to use (go to OpenAI's tokenizer to convert your text to tokens). More tokens = More expensive."
},
"rgpt.temperature" : {
"type": "number",
"default": 0.2,
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"rgpt.top_p": {
"type": "number",
"default": 1,
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered."
},
"rgpt.frequence_penalty": {
"type": "number",
"default": 1.2,
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
},
"rgpt.presence_penalty": {
"type": "number",
"default": 0.3,
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"rgpt.best_of": {
"type": "integer",
"default": 1,
"description": "Generates best_of completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed."
}
}
},
"commands": [
{
"command": "review-gpt.review",
"title": "RGPT: Review Code"
}
]
},
"scripts": {
"vscode:prepublish": "npm run compile",
"compile": "tsc -p ./",
"watch": "tsc -watch -p ./",
"pretest": "npm run compile && npm run lint",
"lint": "eslint src --ext ts",
"test": "node ./out/test/runTest.js"
},
"devDependencies": {
"@types/vscode": "^1.75.0",
"@types/glob": "^8.0.1",
"@types/mocha": "^10.0.1",
"@types/node": "16.x",
"@typescript-eslint/eslint-plugin": "^5.49.0",
"@typescript-eslint/parser": "^5.49.0",
"eslint": "^8.33.0",
"glob": "^8.1.0",
"mocha": "^10.1.0",
"typescript": "^4.9.4",
"@vscode/test-electron": "^2.2.2"
}
}