forked from openai/openai-node
-
Notifications
You must be signed in to change notification settings - Fork 0
/
moderations.ts
369 lines (311 loc) · 9.25 KB
/
moderations.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../resource';
import * as Core from '../core';
export class Moderations extends APIResource {
/**
* Classifies if text and/or image inputs are potentially harmful. Learn more in
* the [moderation guide](https://platform.openai.com/docs/guides/moderation).
*/
create(
body: ModerationCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise<ModerationCreateResponse> {
return this._client.post('/moderations', { body, ...options });
}
}
export interface Moderation {
/**
* A list of the categories, and whether they are flagged or not.
*/
categories: Moderation.Categories;
/**
* A list of the categories along with the input type(s) that the score applies to.
*/
category_applied_input_types: Moderation.CategoryAppliedInputTypes;
/**
* A list of the categories along with their scores as predicted by model.
*/
category_scores: Moderation.CategoryScores;
/**
* Whether any of the below categories are flagged.
*/
flagged: boolean;
}
export namespace Moderation {
/**
* A list of the categories, and whether they are flagged or not.
*/
export interface Categories {
/**
* Content that expresses, incites, or promotes harassing language towards any
* target.
*/
harassment: boolean;
/**
* Harassment content that also includes violence or serious harm towards any
* target.
*/
'harassment/threatening': boolean;
/**
* Content that expresses, incites, or promotes hate based on race, gender,
* ethnicity, religion, nationality, sexual orientation, disability status, or
* caste. Hateful content aimed at non-protected groups (e.g., chess players) is
* harassment.
*/
hate: boolean;
/**
* Hateful content that also includes violence or serious harm towards the targeted
* group based on race, gender, ethnicity, religion, nationality, sexual
* orientation, disability status, or caste.
*/
'hate/threatening': boolean;
/**
* Content that includes instructions or advice that facilitate the planning or
* execution of wrongdoing, or that gives advice or instruction on how to commit
* illicit acts. For example, "how to shoplift" would fit this category.
*/
illicit: boolean;
/**
* Content that includes instructions or advice that facilitate the planning or
* execution of wrongdoing that also includes violence, or that gives advice or
* instruction on the procurement of any weapon.
*/
'illicit/violent': boolean;
/**
* Content that promotes, encourages, or depicts acts of self-harm, such as
* suicide, cutting, and eating disorders.
*/
'self-harm': boolean;
/**
* Content that encourages performing acts of self-harm, such as suicide, cutting,
* and eating disorders, or that gives instructions or advice on how to commit such
* acts.
*/
'self-harm/instructions': boolean;
/**
* Content where the speaker expresses that they are engaging or intend to engage
* in acts of self-harm, such as suicide, cutting, and eating disorders.
*/
'self-harm/intent': boolean;
/**
* Content meant to arouse sexual excitement, such as the description of sexual
* activity, or that promotes sexual services (excluding sex education and
* wellness).
*/
sexual: boolean;
/**
* Sexual content that includes an individual who is under 18 years old.
*/
'sexual/minors': boolean;
/**
* Content that depicts death, violence, or physical injury.
*/
violence: boolean;
/**
* Content that depicts death, violence, or physical injury in graphic detail.
*/
'violence/graphic': boolean;
}
/**
* A list of the categories along with the input type(s) that the score applies to.
*/
export interface CategoryAppliedInputTypes {
/**
* The applied input type(s) for the category 'harassment'.
*/
harassment: Array<'text'>;
/**
* The applied input type(s) for the category 'harassment/threatening'.
*/
'harassment/threatening': Array<'text'>;
/**
* The applied input type(s) for the category 'hate'.
*/
hate: Array<'text'>;
/**
* The applied input type(s) for the category 'hate/threatening'.
*/
'hate/threatening': Array<'text'>;
/**
* The applied input type(s) for the category 'illicit'.
*/
illicit: Array<'text'>;
/**
* The applied input type(s) for the category 'illicit/violent'.
*/
'illicit/violent': Array<'text'>;
/**
* The applied input type(s) for the category 'self-harm'.
*/
'self-harm': Array<'text' | 'image'>;
/**
* The applied input type(s) for the category 'self-harm/instructions'.
*/
'self-harm/instructions': Array<'text' | 'image'>;
/**
* The applied input type(s) for the category 'self-harm/intent'.
*/
'self-harm/intent': Array<'text' | 'image'>;
/**
* The applied input type(s) for the category 'sexual'.
*/
sexual: Array<'text' | 'image'>;
/**
* The applied input type(s) for the category 'sexual/minors'.
*/
'sexual/minors': Array<'text'>;
/**
* The applied input type(s) for the category 'violence'.
*/
violence: Array<'text' | 'image'>;
/**
* The applied input type(s) for the category 'violence/graphic'.
*/
'violence/graphic': Array<'text' | 'image'>;
}
/**
* A list of the categories along with their scores as predicted by model.
*/
export interface CategoryScores {
/**
* The score for the category 'harassment'.
*/
harassment: number;
/**
* The score for the category 'harassment/threatening'.
*/
'harassment/threatening': number;
/**
* The score for the category 'hate'.
*/
hate: number;
/**
* The score for the category 'hate/threatening'.
*/
'hate/threatening': number;
/**
* The score for the category 'illicit'.
*/
illicit: number;
/**
* The score for the category 'illicit/violent'.
*/
'illicit/violent': number;
/**
* The score for the category 'self-harm'.
*/
'self-harm': number;
/**
* The score for the category 'self-harm/instructions'.
*/
'self-harm/instructions': number;
/**
* The score for the category 'self-harm/intent'.
*/
'self-harm/intent': number;
/**
* The score for the category 'sexual'.
*/
sexual: number;
/**
* The score for the category 'sexual/minors'.
*/
'sexual/minors': number;
/**
* The score for the category 'violence'.
*/
violence: number;
/**
* The score for the category 'violence/graphic'.
*/
'violence/graphic': number;
}
}
/**
* An object describing an image to classify.
*/
export interface ModerationImageURLInput {
/**
* Contains either an image URL or a data URL for a base64 encoded image.
*/
image_url: ModerationImageURLInput.ImageURL;
/**
* Always `image_url`.
*/
type: 'image_url';
}
export namespace ModerationImageURLInput {
/**
* Contains either an image URL or a data URL for a base64 encoded image.
*/
export interface ImageURL {
/**
* Either a URL of the image or the base64 encoded image data.
*/
url: string;
}
}
export type ModerationModel =
| 'omni-moderation-latest'
| 'omni-moderation-2024-09-26'
| 'text-moderation-latest'
| 'text-moderation-stable';
/**
* An object describing an image to classify.
*/
export type ModerationMultiModalInput = ModerationImageURLInput | ModerationTextInput;
/**
* An object describing text to classify.
*/
export interface ModerationTextInput {
/**
* A string of text to classify.
*/
text: string;
/**
* Always `text`.
*/
type: 'text';
}
/**
* Represents if a given text input is potentially harmful.
*/
export interface ModerationCreateResponse {
/**
* The unique identifier for the moderation request.
*/
id: string;
/**
* The model used to generate the moderation results.
*/
model: string;
/**
* A list of moderation objects.
*/
results: Array<Moderation>;
}
export interface ModerationCreateParams {
/**
* Input (or inputs) to classify. Can be a single string, an array of strings, or
* an array of multi-modal input objects similar to other models.
*/
input: string | Array<string> | Array<ModerationMultiModalInput>;
/**
* The content moderation model you would like to use. Learn more in
* [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
* learn about available models
* [here](https://platform.openai.com/docs/models#moderation).
*/
model?: (string & {}) | ModerationModel;
}
export declare namespace Moderations {
export {
type Moderation as Moderation,
type ModerationImageURLInput as ModerationImageURLInput,
type ModerationModel as ModerationModel,
type ModerationMultiModalInput as ModerationMultiModalInput,
type ModerationTextInput as ModerationTextInput,
type ModerationCreateResponse as ModerationCreateResponse,
type ModerationCreateParams as ModerationCreateParams,
};
}