@@ -30,6 +30,19 @@ object Models {
30
30
// GPT 4.0 //
31
31
// ///////////////////////////////////////////////////
32
32
33
+ /* *
34
+ * `gpt-4` Turbo. Has a context window of 128,000 tokens with training
35
+ * data up to April 2023. This model is intended to reduce "laziness"
36
+ * where the model doesn't complete the task. Returns a maximum of 4,096
37
+ * output tokens.
38
+ */
39
+ const val GPT_4_0125_PREVIEW = " gpt-4-0125-preview"
40
+
41
+ /* *
42
+ * Points to the currently supported version of `gpt-4` turbo.
43
+ */
44
+ const val GPT_4_TURBO_PREVIEW = " gpt-4-turbo-preview"
45
+
33
46
/* *
34
47
* `gpt-4` Turbo. Has a context window of 128,000 tokens with training
35
48
* data up to April 2023. This model has improved instruction following,
@@ -38,12 +51,18 @@ object Models {
38
51
*/
39
52
const val GPT_4_1106_PREVIEW = " gpt-4-1106-preview"
40
53
54
+ /* *
55
+ * Points to the currently supported version of `gpt-4` turbo with
56
+ * vision.
57
+ */
58
+ const val GPT_4_VISION_PREVIEW = " gpt-4-vision-preview"
59
+
41
60
/* *
42
61
* `gpt-4` Turbo with vision. Has a context window of 128,000 tokens with
43
62
* training data up to April 2023. Has the same capabilities as
44
63
* [GPT_4_1106_PREVIEW], but can also understand images.
45
64
*/
46
- const val GPT_4_VISION_PREVIEW = " gpt-4-vision-preview"
65
+ const val GPT_4_1106_VISION_PREVIEW = " gpt-4-1106 -vision-preview"
47
66
48
67
/* *
49
68
* Points to the currently supported version of `gpt-4`.
@@ -103,6 +122,12 @@ object Models {
103
122
// GPT 3.5 //
104
123
// ///////////////////////////////////////////////////
105
124
125
+ /* *
126
+ * Snapshot of `gpt-3.5-turbo` with higher accuracy in responding in
127
+ * requested formats. Returns a maximum of 4,096 output tokens.
128
+ */
129
+ const val GPT_3_5_TURBO_0125 = " gpt-3.5-turbo-0125"
130
+
106
131
/* *
107
132
* Has a context window of 16,385 tokens with training data up to
108
133
* September 2021. This model has improved instruction following, JSON
0 commit comments