-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgpt
executable file
·134 lines (113 loc) · 3.2 KB
/
gpt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
#!/bin/sh
# coloring
NO_FORMAT="\033[0m"
C_RED="\033[38;5;9m"
C_PURPLE="\033[38;5;5m"
C_CYAN="\033[38;5;14m"
# check the dependencies
dependencies=" jq curl "
for dependency in $dependencies; do
if ! command -v "$dependency" &> /dev/null ; then
echo -e "${C_RED}Error: unsatisfied dependency $dependency${NO_FORMAT}"
exit 1
fi
done
# spinner animation function
spin() {
local spin_chars="⡿⢿⣻⣽⣾⣷⣯⣟"
local i=0
while [ true ]; do
printf "\r${C_CYAN}${spin_chars:i++%8:1}${NO_FORMAT} "
sleep 0.1
done
}
# a function that accepts 2 arguments "model" and "content"
request() {
model=$1
content=$2
# start spinner animation in the background
spin &
spinner_pid=$!
response=$(curl -s https://api.openai.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $OPENAI_API_KEY" \
-d '{
"model":"'"$model"'",
"messages":[
{"role": "system", "content": "you are a cli assistant, respond with informative but rapid answers, consider that your answer will be directly printed to terminal"},
{"role": "user", "content": "'"$content"'"}
]}')
# stop spinner animation
kill $spinner_pid &> /dev/null
printf "\r"
text=$(echo $response | jq -r '.choices[0].message.content')
if [ -z "$text" ] || [ "$text" == "null" ]; then
error=$(echo $response | jq -r '.error.message')
if [ -z "$error" ] || [ "$error" == "null" ]; then
echo -e "${C_RED}No response${NO_FORMAT}"
else
echo -e "${C_RED}$error${NO_FORMAT}"
fi
exit 1
fi
echo -e "${C_PURPLE}>${NO_FORMAT} $text"
}
# List of available models ("gpt-4 gpt-3.5-turbo")
models=" gpt-4 gpt-3.5-turbo gpt-3.5 "
# print help message
print_help() {
echo "This script sends a request using OpenAI API to generate chat completions"
echo "Usage: ${0##*/} [OPTIONS] QUERY"
echo "Options:"
echo " -m, --model MODEL Specify the model to use (default: gpt-3.5-turbo)"
echo " Supported models:$models"
echo
echo " -h, --help Print this help message"
}
# Arguments and query parsing
model="gpt-3.5-turbo"
while [[ $# -gt 0 ]]; do
case $1 in
-m|--model)
model=$2
shift 2
;;
-h|--help)
print_help
exit 0
;;
*)
break
;;
esac
done
# if not reading from piping, check if there is a query argument
if [ -t 0 ] && [ $# -eq 0 ]; then
echo -e "${C_RED}Error: No input provided${NO_FORMAT}"
print_help
exit 1
fi
# read from piping or use the provided query argument
if [ -t 0 ]; then
query="$@"
else
query=$(cat)
fi
# check that the model is in the list of models
if ! [[ "$models" == *" $model "* ]]; then
echo -e "${C_RED}Error: Model '$model' is not supported${NO_FORMAT}"
exit 1
fi
# check that the variable OPENAI_API_KEY is specified
if [ -z "$OPENAI_API_KEY" ]; then
echo -e "${C_RED}Error: OPENAI_API_KEY is not specified${NO_FORMAT}"
exit 1
fi
# if the model is "gpt-3.5", it should be changed to "gpt-3.5-turbo"
if [ "$model" == "gpt-3.5" ]; then
model="gpt-3.5-turbo"
elif [ "$model" == "gpt-4" ]; then
model="gpt-4-1106-preview"
fi
# call request function with the model and query
request $model "$query"