types.ts 1008 B

123456789101112131415161718192021222324
  1. export namespace Anthropic {
  2. export interface ChatRequest {
  3. model: string; // The model that will complete your prompt.
  4. prompt: string; // The prompt that you want Claude to complete.
  5. max_tokens_to_sample: number; // The maximum number of tokens to generate before stopping.
  6. stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text.
  7. temperature?: number; // Amount of randomness injected into the response.
  8. top_p?: number; // Use nucleus sampling.
  9. top_k?: number; // Only sample from the top K options for each subsequent token.
  10. metadata?: object; // An object describing metadata about the request.
  11. stream?: boolean; // Whether to incrementally stream the response using server-sent events.
  12. }
  13. export interface ChatResponse {
  14. completion: string;
  15. stop_reason: "stop_sequence" | "max_tokens";
  16. model: string;
  17. }
  18. export type ChatStreamResponse = ChatResponse & {
  19. stop?: string;
  20. log_id: string;
  21. };
  22. }