types.ts 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. export namespace OpenAI {
  2. export type Role = "system" | "user" | "assistant" | "function";
  3. export type FinishReason = "stop" | "length" | "function_call";
  4. export interface Message {
  5. role: Role;
  6. content?: string;
  7. function_call?: {
  8. name: string;
  9. arguments: string;
  10. };
  11. }
  12. export interface Function {
  13. name: string;
  14. description?: string;
  15. parameters: object;
  16. }
  17. export interface ListModelResponse {
  18. object: string;
  19. data: Array<{
  20. id: string;
  21. object: string;
  22. root: string;
  23. }>;
  24. }
  25. export interface ChatCompletionChoice {
  26. index: number;
  27. message: Message;
  28. finish_reason: FinishReason;
  29. }
  30. export interface ChatCompletionUsage {
  31. prompt_tokens: number;
  32. completion_tokens: number;
  33. total_tokens: number;
  34. }
  35. export interface ChatCompletionResponse {
  36. id: string;
  37. object: string;
  38. created: number;
  39. model: string;
  40. choices: ChatCompletionChoice[];
  41. usage: ChatCompletionUsage;
  42. }
  43. export interface ChatCompletionChunkChoice {
  44. index: number;
  45. delta: Message;
  46. finish_reason?: FinishReason;
  47. }
  48. export interface ChatCompletionStreamResponse {
  49. object: string;
  50. created: number;
  51. model: string;
  52. choices: ChatCompletionChunkChoice[];
  53. }
  54. export interface ChatCompletionRequest {
  55. model: string;
  56. messages: Message[];
  57. functions?: Function[];
  58. function_call?: "none" | "auto";
  59. temperature?: number;
  60. top_p?: number;
  61. n?: number;
  62. stream?: boolean;
  63. stop?: string | string[];
  64. max_tokens?: number;
  65. presence_penalty?: number;
  66. frequency_penalty?: number;
  67. }
  68. }