diff --git a/.ocamlformat b/.ocamlformat new file mode 100644 index 0000000..a499fbc --- /dev/null +++ b/.ocamlformat @@ -0,0 +1 @@ +profile = default \ No newline at end of file diff --git a/examples/ai_chat.ml b/examples/ai_chat.ml index ff792d9..936490c 100644 --- a/examples/ai_chat.ml +++ b/examples/ai_chat.ml @@ -1,9 +1,7 @@ class read_line ~term ~history ~state = object (self) inherit LTerm_read_line.read_line ~history () - inherit [Zed_string.t] LTerm_read_line.term term - method! show_box = false initializer @@ -19,32 +17,29 @@ let rec loop term history state = new read_line ~term ~history:(LTerm_history.contents history) ~state in let%lwt command = rl#run in - Lwt.return_some command ) + Lwt.return_some command) (function Sys.Break -> Lwt.return None | exn -> Lwt.fail exn) in match input with | Some input -> let message = - Openai.Chat_completion_request_message.make - ~role:`User - ~content:(Zed_string.to_utf8 input) - () + Openai.Chat_completion_request_message.make ~role:`User + ~content:(Zed_string.to_utf8 input) () in - let state = state @ [message] in + let state = state @ [ message ] in let req = - Openai.Create_chat_completion_request.make - ~model:"gpt-3.5-turbo" - ~messages:state - () + Openai.Create_chat_completion_request.make ~model:"gpt-3.5-turbo" + ~messages:state () in (* n = 1 so we can assume the list is always shaped like this *) - let%lwt[@warning "-8"] {choices= [{message= message'; _}]; _} = + let%lwt[@warning "-8"] { choices = [ { message = message'; _ } ]; _ } = Openai.Open_ai_api.create_chat_completion ~create_chat_completion_request_t: - {req with messages= state; n= Some 1l} + { req with messages = state; n = Some 1l } in let state = - state @ [{role= message'.role; content= message'.content; name= None}] + state + @ [ { role = message'.role; content = message'.content; name = None } ] in let label = new LTerm_widget.label message'.content in let () = label#set_alignment LTerm_geom.H_align_right in @@ -58,28 +53,30 @@ let rec loop term history state = | line :: lines -> if String.length line > text_width then let line' = String.sub line 0 text_width in - let line'' = String.sub line text_width (String.length line - text_width) in + let line'' = + String.sub line text_width (String.length line - text_width) + in line' :: aux (line'' :: lines) else line :: aux lines in aux lines |> String.concat "\n" in LTerm_text.( - eval [ - B_bold true; - B_fg LTerm_style.black; - B_bg (LTerm_style.rgb 128 255 128); - S message''; - E_bg; - E_fg; - E_bold; - ]) + eval + [ + B_bold true; + B_fg LTerm_style.black; + B_bg (LTerm_style.rgb 128 255 128); + S message''; + E_bg; + E_fg; + E_bold; + ]) |> LTerm.fprintls term in - LTerm_history.add history input ; - loop term history state - | None -> + LTerm_history.add history input; loop term history state + | None -> loop term history state let chatgpt initial_state = let%lwt () = LTerm_inputrc.load () in @@ -87,20 +84,20 @@ let chatgpt initial_state = (fun () -> let state = initial_state in let%lwt term = Lazy.force LTerm.stdout in - loop term (LTerm_history.create []) state ) - (function LTerm_read_line.Interrupt -> Lwt.return () | exn -> Lwt.fail exn) + loop term (LTerm_history.create []) state) + (function + | LTerm_read_line.Interrupt -> Lwt.return () | exn -> Lwt.fail exn) let _ = let system_prompt = - Openai.Chat_completion_request_message.make - ~role:`System - ~content:"You are a general-purpose assistant who provides detailed, helpful responses." + Openai.Chat_completion_request_message.make ~role:`System + ~content: + "You are a general-purpose assistant who provides detailed, helpful \ + responses." () in let init_message = - Openai.Chat_completion_request_message.make - ~role:`User - ~content:"Hello! What are you?" - () + Openai.Chat_completion_request_message.make ~role:`User + ~content:"Hello! What are you?" () in - Lwt_main.run (chatgpt [system_prompt; init_message]) + Lwt_main.run (chatgpt [ system_prompt; init_message ]) diff --git a/src/apis/open_ai_api.ml b/src/apis/open_ai_api.ml index 7294b40..c9cced9 100644 --- a/src/apis/open_ai_api.ml +++ b/src/apis/open_ai_api.ml @@ -6,246 +6,397 @@ *) let cancel_fine_tune ~fine_tune_id = - let open Lwt in - let uri = Request.build_uri "/fine-tunes/{fine_tune_id}/cancel" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "fine_tune_id" (fun x -> x) fine_tune_id in - Cohttp_lwt_unix.Client.call `POST uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Fine_tune.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/fine-tunes/{fine_tune_id}/cancel" in + let headers = Request.default_headers in + let uri = + Request.replace_path_param uri "fine_tune_id" (fun x -> x) fine_tune_id + in + Cohttp_lwt_unix.Client.call `POST uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as (JsonSupport.unwrap Fine_tune.of_yojson) resp body let create_answer ~create_answer_request_t = - let open Lwt in - let uri = Request.build_uri "/answers" in - let headers = Request.default_headers in - let body = Request.write_as_json_body Create_answer_request.to_yojson create_answer_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_answer_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/answers" in + let headers = Request.default_headers in + let body = + Request.write_as_json_body Create_answer_request.to_yojson + create_answer_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_answer_response.of_yojson) + resp body let create_chat_completion ~create_chat_completion_request_t = - let open Lwt in - let uri = Request.build_uri "/chat/completions" in - let headers = Request.default_headers in - let body = Request.write_as_json_body Create_chat_completion_request.to_yojson create_chat_completion_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_chat_completion_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/chat/completions" in + let headers = Request.default_headers in + let body = + Request.write_as_json_body Create_chat_completion_request.to_yojson + create_chat_completion_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_chat_completion_response.of_yojson) + resp body let create_classification ~create_classification_request_t = - let open Lwt in - let uri = Request.build_uri "/classifications" in - let headers = Request.default_headers in - let body = Request.write_as_json_body Create_classification_request.to_yojson create_classification_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_classification_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/classifications" in + let headers = Request.default_headers in + let body = + Request.write_as_json_body Create_classification_request.to_yojson + create_classification_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_classification_response.of_yojson) + resp body let create_completion ~create_completion_request_t = - let open Lwt in - let uri = Request.build_uri "/completions" in - let headers = Request.default_headers in - let body = Request.write_as_json_body Create_completion_request.to_yojson create_completion_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_completion_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/completions" in + let headers = Request.default_headers in + let body = + Request.write_as_json_body Create_completion_request.to_yojson + create_completion_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_completion_response.of_yojson) + resp body let create_edit ~create_edit_request_t = - let open Lwt in - let uri = Request.build_uri "/edits" in - let headers = Request.default_headers in - let body = Request.write_as_json_body Create_edit_request.to_yojson create_edit_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_edit_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/edits" in + let headers = Request.default_headers in + let body = + Request.write_as_json_body Create_edit_request.to_yojson + create_edit_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_edit_response.of_yojson) + resp body let create_embedding ~create_embedding_request_t = - let open Lwt in - let uri = Request.build_uri "/embeddings" in - let headers = Request.default_headers in - let body = Request.write_as_json_body Create_embedding_request.to_yojson create_embedding_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_embedding_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/embeddings" in + let headers = Request.default_headers in + let body = + Request.write_as_json_body Create_embedding_request.to_yojson + create_embedding_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_embedding_response.of_yojson) + resp body let create_file ~file ~purpose = - let open Lwt in - let uri = Request.build_uri "/files" in - let headers = Request.default_headers in - let body = Request.init_form_encoded_body () in - let body = Request.add_form_encoded_body_param body "file" (fun x -> x) file in - let body = Request.add_form_encoded_body_param body "purpose" (fun x -> x) purpose in - let body = Request.finalize_form_encoded_body body in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Open_ai_file.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/files" in + let headers = Request.default_headers in + let body = Request.init_form_encoded_body () in + let body = + Request.add_form_encoded_body_param body "file" (fun x -> x) file + in + let body = + Request.add_form_encoded_body_param body "purpose" (fun x -> x) purpose + in + let body = Request.finalize_form_encoded_body body in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Open_ai_file.of_yojson) + resp body let create_fine_tune ~create_fine_tune_request_t = - let open Lwt in - let uri = Request.build_uri "/fine-tunes" in - let headers = Request.default_headers in - let body = Request.write_as_json_body Create_fine_tune_request.to_yojson create_fine_tune_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Fine_tune.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/fine-tunes" in + let headers = Request.default_headers in + let body = + Request.write_as_json_body Create_fine_tune_request.to_yojson + create_fine_tune_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as (JsonSupport.unwrap Fine_tune.of_yojson) resp body let create_image ~create_image_request_t = - let open Lwt in - let uri = Request.build_uri "/images/generations" in - let headers = Request.default_headers in - let body = Request.write_as_json_body Create_image_request.to_yojson create_image_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Images_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/images/generations" in + let headers = Request.default_headers in + let body = + Request.write_as_json_body Create_image_request.to_yojson + create_image_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Images_response.of_yojson) + resp body let create_image_edit ~image ~prompt ?mask () = - let open Lwt in - let uri = Request.build_uri "/images/edits" in - let headers = Request.default_headers in - let body = Request.init_form_encoded_body () in - let body = Request.add_form_encoded_body_param body "image" (fun x -> x) image in - let body = Request.maybe_add_form_encoded_body_param body "mask" (fun x -> x) mask in - let body = Request.add_form_encoded_body_param body "prompt" (fun x -> x) prompt in - let body = Request.finalize_form_encoded_body body in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Images_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/images/edits" in + let headers = Request.default_headers in + let body = Request.init_form_encoded_body () in + let body = + Request.add_form_encoded_body_param body "image" (fun x -> x) image + in + let body = + Request.maybe_add_form_encoded_body_param body "mask" (fun x -> x) mask + in + let body = + Request.add_form_encoded_body_param body "prompt" (fun x -> x) prompt + in + let body = Request.finalize_form_encoded_body body in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Images_response.of_yojson) + resp body let create_image_variation ~image = - let open Lwt in - let uri = Request.build_uri "/images/variations" in - let headers = Request.default_headers in - let body = Request.init_form_encoded_body () in - let body = Request.add_form_encoded_body_param body "image" (fun x -> x) image in - let body = Request.finalize_form_encoded_body body in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Images_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/images/variations" in + let headers = Request.default_headers in + let body = Request.init_form_encoded_body () in + let body = + Request.add_form_encoded_body_param body "image" (fun x -> x) image + in + let body = Request.finalize_form_encoded_body body in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Images_response.of_yojson) + resp body let create_moderation ~create_moderation_request_t = - let open Lwt in - let uri = Request.build_uri "/moderations" in - let headers = Request.default_headers in - let body = Request.write_as_json_body Create_moderation_request.to_yojson create_moderation_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_moderation_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/moderations" in + let headers = Request.default_headers in + let body = + Request.write_as_json_body Create_moderation_request.to_yojson + create_moderation_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_moderation_response.of_yojson) + resp body let create_search ~engine_id ~create_search_request_t = - let open Lwt in - let uri = Request.build_uri "/engines/{engine_id}/search" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "engine_id" (fun x -> x) engine_id in - let body = Request.write_as_json_body Create_search_request.to_yojson create_search_request_t in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_search_response.of_yojson) resp body - -let create_transcription ~file ~model ?prompt ?(response_format = "json") ?(temperature = 0.) ?language () = - let open Lwt in - let uri = Request.build_uri "/audio/transcriptions" in - let headers = Request.default_headers in - let body = Request.init_form_encoded_body () in - let body = Request.add_form_encoded_body_param body "file" (fun x -> x) file in - let body = Request.add_form_encoded_body_param body "model" (fun x -> x) model in - let body = Request.maybe_add_form_encoded_body_param body "prompt" (fun x -> x) prompt in - let body = Request.add_form_encoded_body_param body "response_format" (fun x -> x) response_format in - let body = Request.add_form_encoded_body_param body "temperature" string_of_float temperature in - let body = Request.maybe_add_form_encoded_body_param body "language" (fun x -> x) language in - let body = Request.finalize_form_encoded_body body in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_transcription_response.of_yojson) resp body - -let create_translation ~file ~model ?prompt ?(response_format = "json") ?(temperature = 0.) () = - let open Lwt in - let uri = Request.build_uri "/audio/translations" in - let headers = Request.default_headers in - let body = Request.init_form_encoded_body () in - let body = Request.add_form_encoded_body_param body "file" (fun x -> x) file in - let body = Request.add_form_encoded_body_param body "model" (fun x -> x) model in - let body = Request.maybe_add_form_encoded_body_param body "prompt" (fun x -> x) prompt in - let body = Request.add_form_encoded_body_param body "response_format" (fun x -> x) response_format in - let body = Request.add_form_encoded_body_param body "temperature" string_of_float temperature in - let body = Request.finalize_form_encoded_body body in - Cohttp_lwt_unix.Client.call `POST uri ~headers ~body >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Create_translation_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/engines/{engine_id}/search" in + let headers = Request.default_headers in + let uri = Request.replace_path_param uri "engine_id" (fun x -> x) engine_id in + let body = + Request.write_as_json_body Create_search_request.to_yojson + create_search_request_t + in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_search_response.of_yojson) + resp body + +let create_transcription ~file ~model ?prompt ?(response_format = "json") + ?(temperature = 0.) ?language () = + let open Lwt in + let uri = Request.build_uri "/audio/transcriptions" in + let headers = Request.default_headers in + let body = Request.init_form_encoded_body () in + let body = + Request.add_form_encoded_body_param body "file" (fun x -> x) file + in + let body = + Request.add_form_encoded_body_param body "model" (fun x -> x) model + in + let body = + Request.maybe_add_form_encoded_body_param body "prompt" (fun x -> x) prompt + in + let body = + Request.add_form_encoded_body_param body "response_format" + (fun x -> x) + response_format + in + let body = + Request.add_form_encoded_body_param body "temperature" string_of_float + temperature + in + let body = + Request.maybe_add_form_encoded_body_param body "language" + (fun x -> x) + language + in + let body = Request.finalize_form_encoded_body body in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_transcription_response.of_yojson) + resp body + +let create_translation ~file ~model ?prompt ?(response_format = "json") + ?(temperature = 0.) () = + let open Lwt in + let uri = Request.build_uri "/audio/translations" in + let headers = Request.default_headers in + let body = Request.init_form_encoded_body () in + let body = + Request.add_form_encoded_body_param body "file" (fun x -> x) file + in + let body = + Request.add_form_encoded_body_param body "model" (fun x -> x) model + in + let body = + Request.maybe_add_form_encoded_body_param body "prompt" (fun x -> x) prompt + in + let body = + Request.add_form_encoded_body_param body "response_format" + (fun x -> x) + response_format + in + let body = + Request.add_form_encoded_body_param body "temperature" string_of_float + temperature + in + let body = Request.finalize_form_encoded_body body in + Cohttp_lwt_unix.Client.call `POST uri ~headers ~body + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Create_translation_response.of_yojson) + resp body let delete_file ~file_id = - let open Lwt in - let uri = Request.build_uri "/files/{file_id}" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "file_id" (fun x -> x) file_id in - Cohttp_lwt_unix.Client.call `DELETE uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Delete_file_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/files/{file_id}" in + let headers = Request.default_headers in + let uri = Request.replace_path_param uri "file_id" (fun x -> x) file_id in + Cohttp_lwt_unix.Client.call `DELETE uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Delete_file_response.of_yojson) + resp body let delete_model ~model = - let open Lwt in - let uri = Request.build_uri "/models/{model}" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "model" (fun x -> x) model in - Cohttp_lwt_unix.Client.call `DELETE uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Delete_model_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/models/{model}" in + let headers = Request.default_headers in + let uri = Request.replace_path_param uri "model" (fun x -> x) model in + Cohttp_lwt_unix.Client.call `DELETE uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Delete_model_response.of_yojson) + resp body let download_file ~file_id = - let open Lwt in - let uri = Request.build_uri "/files/{file_id}/content" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "file_id" (fun x -> x) file_id in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.to_string) resp body + let open Lwt in + let uri = Request.build_uri "/files/{file_id}/content" in + let headers = Request.default_headers in + let uri = Request.replace_path_param uri "file_id" (fun x -> x) file_id in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as JsonSupport.to_string resp body let list_engines () = - let open Lwt in - let uri = Request.build_uri "/engines" in - let headers = Request.default_headers in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap List_engines_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/engines" in + let headers = Request.default_headers in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap List_engines_response.of_yojson) + resp body let list_files () = - let open Lwt in - let uri = Request.build_uri "/files" in - let headers = Request.default_headers in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap List_files_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/files" in + let headers = Request.default_headers in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap List_files_response.of_yojson) + resp body let list_fine_tune_events ~fine_tune_id ?(stream = false) () = - let open Lwt in - let uri = Request.build_uri "/fine-tunes/{fine_tune_id}/events" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "fine_tune_id" (fun x -> x) fine_tune_id in - let uri = Request.add_query_param uri "stream" string_of_bool stream in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap List_fine_tune_events_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/fine-tunes/{fine_tune_id}/events" in + let headers = Request.default_headers in + let uri = + Request.replace_path_param uri "fine_tune_id" (fun x -> x) fine_tune_id + in + let uri = Request.add_query_param uri "stream" string_of_bool stream in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap List_fine_tune_events_response.of_yojson) + resp body let list_fine_tunes () = - let open Lwt in - let uri = Request.build_uri "/fine-tunes" in - let headers = Request.default_headers in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap List_fine_tunes_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/fine-tunes" in + let headers = Request.default_headers in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap List_fine_tunes_response.of_yojson) + resp body let list_models () = - let open Lwt in - let uri = Request.build_uri "/models" in - let headers = Request.default_headers in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap List_models_response.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/models" in + let headers = Request.default_headers in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap List_models_response.of_yojson) + resp body let retrieve_engine ~engine_id = - let open Lwt in - let uri = Request.build_uri "/engines/{engine_id}" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "engine_id" (fun x -> x) engine_id in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Engine.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/engines/{engine_id}" in + let headers = Request.default_headers in + let uri = Request.replace_path_param uri "engine_id" (fun x -> x) engine_id in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as (JsonSupport.unwrap Engine.of_yojson) resp body let retrieve_file ~file_id = - let open Lwt in - let uri = Request.build_uri "/files/{file_id}" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "file_id" (fun x -> x) file_id in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Open_ai_file.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/files/{file_id}" in + let headers = Request.default_headers in + let uri = Request.replace_path_param uri "file_id" (fun x -> x) file_id in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as + (JsonSupport.unwrap Open_ai_file.of_yojson) + resp body let retrieve_fine_tune ~fine_tune_id = - let open Lwt in - let uri = Request.build_uri "/fine-tunes/{fine_tune_id}" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "fine_tune_id" (fun x -> x) fine_tune_id in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Fine_tune.of_yojson) resp body + let open Lwt in + let uri = Request.build_uri "/fine-tunes/{fine_tune_id}" in + let headers = Request.default_headers in + let uri = + Request.replace_path_param uri "fine_tune_id" (fun x -> x) fine_tune_id + in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as (JsonSupport.unwrap Fine_tune.of_yojson) resp body let retrieve_model ~model = - let open Lwt in - let uri = Request.build_uri "/models/{model}" in - let headers = Request.default_headers in - let uri = Request.replace_path_param uri "model" (fun x -> x) model in - Cohttp_lwt_unix.Client.call `GET uri ~headers >>= fun (resp, body) -> - Request.read_json_body_as (JsonSupport.unwrap Model.of_yojson) resp body - + let open Lwt in + let uri = Request.build_uri "/models/{model}" in + let headers = Request.default_headers in + let uri = Request.replace_path_param uri "model" (fun x -> x) model in + Cohttp_lwt_unix.Client.call `GET uri ~headers + >>= fun (resp, body) -> + Request.read_json_body_as (JsonSupport.unwrap Model.of_yojson) resp body diff --git a/src/apis/open_ai_api.mli b/src/apis/open_ai_api.mli index d563555..e5dd0e2 100644 --- a/src/apis/open_ai_api.mli +++ b/src/apis/open_ai_api.mli @@ -6,30 +6,99 @@ *) val cancel_fine_tune : fine_tune_id:string -> Fine_tune.t Lwt.t -val create_answer : create_answer_request_t:Create_answer_request.t -> Create_answer_response.t Lwt.t -val create_chat_completion : create_chat_completion_request_t:Create_chat_completion_request.t -> Create_chat_completion_response.t Lwt.t -val create_classification : create_classification_request_t:Create_classification_request.t -> Create_classification_response.t Lwt.t -val create_completion : create_completion_request_t:Create_completion_request.t -> Create_completion_response.t Lwt.t -val create_edit : create_edit_request_t:Create_edit_request.t -> Create_edit_response.t Lwt.t -val create_embedding : create_embedding_request_t:Create_embedding_request.t -> Create_embedding_response.t Lwt.t + +val create_answer : + create_answer_request_t:Create_answer_request.t + -> Create_answer_response.t Lwt.t + +val create_chat_completion : + create_chat_completion_request_t:Create_chat_completion_request.t + -> Create_chat_completion_response.t Lwt.t + +val create_classification : + create_classification_request_t:Create_classification_request.t + -> Create_classification_response.t Lwt.t + +val create_completion : + create_completion_request_t:Create_completion_request.t + -> Create_completion_response.t Lwt.t + +val create_edit : + create_edit_request_t:Create_edit_request.t -> Create_edit_response.t Lwt.t + +val create_embedding : + create_embedding_request_t:Create_embedding_request.t + -> Create_embedding_response.t Lwt.t + val create_file : file:string -> purpose:string -> Open_ai_file.t Lwt.t -val create_fine_tune : create_fine_tune_request_t:Create_fine_tune_request.t -> Fine_tune.t Lwt.t -val create_image : create_image_request_t:Create_image_request.t -> Images_response.t Lwt.t -val create_image_edit : image:string -> prompt:string -> ?mask:string -> unit -> Images_response.t Lwt.t + +val create_fine_tune : + create_fine_tune_request_t:Create_fine_tune_request.t -> Fine_tune.t Lwt.t + +val create_image : + create_image_request_t:Create_image_request.t -> Images_response.t Lwt.t + +val create_image_edit : + image:string + -> prompt:string + -> ?mask:string + -> unit + -> Images_response.t Lwt.t + val create_image_variation : image:string -> Images_response.t Lwt.t -val create_moderation : create_moderation_request_t:Create_moderation_request.t -> Create_moderation_response.t Lwt.t -val create_search : engine_id:string -> create_search_request_t:Create_search_request.t -> Create_search_response.t Lwt.t -val create_transcription : file:string -> model:string -> ?prompt:string -> ?response_format:string -> ?temperature:float -> ?language:string -> unit -> Create_transcription_response.t Lwt.t -val create_translation : file:string -> model:string -> ?prompt:string -> ?response_format:string -> ?temperature:float -> unit -> Create_translation_response.t Lwt.t + +val create_moderation : + create_moderation_request_t:Create_moderation_request.t + -> Create_moderation_response.t Lwt.t + +val create_search : + engine_id:string + -> create_search_request_t:Create_search_request.t + -> Create_search_response.t Lwt.t + +val create_transcription : + file:string + -> model:string + -> ?prompt:string + -> ?response_format:string + -> ?temperature:float + -> ?language:string + -> unit + -> Create_transcription_response.t Lwt.t + +val create_translation : + file:string + -> model:string + -> ?prompt:string + -> ?response_format:string + -> ?temperature:float + -> unit + -> Create_translation_response.t Lwt.t + val delete_file : file_id:string -> Delete_file_response.t Lwt.t + val delete_model : model:string -> Delete_model_response.t Lwt.t + val download_file : file_id:string -> string Lwt.t + val list_engines : unit -> List_engines_response.t Lwt.t + val list_files : unit -> List_files_response.t Lwt.t -val list_fine_tune_events : fine_tune_id:string -> ?stream:bool -> unit -> List_fine_tune_events_response.t Lwt.t + +val list_fine_tune_events : + fine_tune_id:string + -> ?stream:bool + -> unit + -> List_fine_tune_events_response.t Lwt.t + val list_fine_tunes : unit -> List_fine_tunes_response.t Lwt.t + val list_models : unit -> List_models_response.t Lwt.t + val retrieve_engine : engine_id:string -> Engine.t Lwt.t + val retrieve_file : file_id:string -> Open_ai_file.t Lwt.t + val retrieve_fine_tune : fine_tune_id:string -> Fine_tune.t Lwt.t + val retrieve_model : model:string -> Model.t Lwt.t diff --git a/src/models/chat_completion_request_message.ml b/src/models/chat_completion_request_message.ml index 0f26407..53364c1 100644 --- a/src/models/chat_completion_request_message.ml +++ b/src/models/chat_completion_request_message.ml @@ -6,10 +6,11 @@ *) type t = { - (* The role of the author of this message. *) - role: Enums.role; - (* The contents of the message *) - content: string; - (* The name of the user in a multi-user chat *) - name: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + (* The role of the author of this message. *) + role : Enums.role; + (* The contents of the message *) + content : string; + (* The name of the user in a multi-user chat *) + name : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/chat_completion_response_message.ml b/src/models/chat_completion_response_message.ml index 16780d6..c84a83d 100644 --- a/src/models/chat_completion_response_message.ml +++ b/src/models/chat_completion_response_message.ml @@ -6,8 +6,9 @@ *) type t = { - (* The role of the author of this message. *) - role: Enums.role; - (* The contents of the message *) - content: string; -} [@@deriving yojson { strict = false }, show, make ];; + (* The role of the author of this message. *) + role : Enums.role; + (* The contents of the message *) + content : string; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_answer_request.ml b/src/models/create_answer_request.ml index b998c48..1a2997b 100644 --- a/src/models/create_answer_request.ml +++ b/src/models/create_answer_request.ml @@ -6,34 +6,35 @@ *) type t = { - (* ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. *) - model: string; - (* Question to get answered. *) - question: string; - (* List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. *) - examples: string list list; - (* A text snippet containing the contextual information used to generate the answers for the `examples` you provide. *) - examples_context: string; - (* List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. You should specify either `documents` or a `file`, but not both. *) - documents: string list; - (* The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `documents` or a `file`, but not both. *) - file: string option [@default None]; - (* ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. *) - search_model: string option [@default None]; - (* The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. *) - max_rerank: int32 option [@default None]; - (* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. *) - temperature: float option [@default None]; - (* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. *) - logprobs: int32 option [@default None]; - (* The maximum number of tokens allowed for the generated answer *) - max_tokens: int32 option [@default None]; - (* Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. *) - stop: string array option [@default None]; - (* How many answers to generate for each question. *) - n: int32 option [@default None]; - (* If set to `true`, the returned JSON will include a \''prompt\'' field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. *) - return_prompt: bool option [@default None]; - (* If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. *) - expand: Yojson.Safe.t list; -} [@@deriving yojson { strict = false }, show, make ];; + (* ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. *) + model : string; + (* Question to get answered. *) + question : string; + (* List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. *) + examples : string list list; + (* A text snippet containing the contextual information used to generate the answers for the `examples` you provide. *) + examples_context : string; + (* List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. You should specify either `documents` or a `file`, but not both. *) + documents : string list; + (* The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `documents` or a `file`, but not both. *) + file : string option; [@default None] + (* ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. *) + search_model : string option; [@default None] + (* The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. *) + max_rerank : int32 option; [@default None] + (* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. *) + temperature : float option; [@default None] + (* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. *) + logprobs : int32 option; [@default None] + (* The maximum number of tokens allowed for the generated answer *) + max_tokens : int32 option; [@default None] + (* Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. *) + stop : string array option; [@default None] + (* How many answers to generate for each question. *) + n : int32 option; [@default None] + (* If set to `true`, the returned JSON will include a \''prompt\'' field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. *) + return_prompt : bool option; [@default None] + (* If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. *) + expand : Yojson.Safe.t list; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_answer_response.ml b/src/models/create_answer_response.ml index eb065b2..21fda60 100644 --- a/src/models/create_answer_response.ml +++ b/src/models/create_answer_response.ml @@ -6,10 +6,11 @@ *) type t = { - _object: string option [@default None] [@key "object"]; - model: string option [@default None]; - search_model: string option [@default None]; - completion: string option [@default None]; - answers: string list; - selected_documents: Create_answer_response_selected_documents.t list; -} [@@deriving yojson { strict = false }, show, make ];; + _object : string option; [@default None] [@key "object"] + model : string option; [@default None] + search_model : string option; [@default None] + completion : string option; [@default None] + answers : string list; + selected_documents : Create_answer_response_selected_documents.t list; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_answer_response_selected_documents.ml b/src/models/create_answer_response_selected_documents.ml index 3eb3385..60bf526 100644 --- a/src/models/create_answer_response_selected_documents.ml +++ b/src/models/create_answer_response_selected_documents.ml @@ -6,6 +6,7 @@ *) type t = { - document: int32 option [@default None]; - text: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + document : int32 option; [@default None] + text : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_chat_completion_request.ml b/src/models/create_chat_completion_request.ml index 3af5864..88d092a 100644 --- a/src/models/create_chat_completion_request.ml +++ b/src/models/create_chat_completion_request.ml @@ -6,26 +6,27 @@ *) type t = { - (* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. *) - model: string; - (* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). *) - messages: Chat_completion_request_message.t list; - (* completions_temperature_description *) - temperature: float option [@default None]; - (* completions_top_p_description *) - top_p: float option [@default None]; - (* How many chat completion choices to generate for each input message. *) - n: int32 option [@default None]; - (* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. *) - stream: bool option [@default None]; - (* Up to 4 sequences where the API will stop generating further tokens. *) - stop: string array option [@default None]; - (* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). *) - max_tokens: int32 option [@default None]; - (* completions_presence_penalty_description *) - presence_penalty: float option [@default None]; - (* completions_frequency_penalty_description *) - frequency_penalty: float option [@default None]; - (* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. *) - logit_bias: Yojson.Safe.t option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + (* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. *) + model : string; + (* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). *) + messages : Chat_completion_request_message.t list; + (* completions_temperature_description *) + temperature : float option; [@default None] + (* completions_top_p_description *) + top_p : float option; [@default None] + (* How many chat completion choices to generate for each input message. *) + n : int32 option; [@default None] + (* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. *) + stream : bool option; [@default None] + (* Up to 4 sequences where the API will stop generating further tokens. *) + stop : string array option; [@default None] + (* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). *) + max_tokens : int32 option; [@default None] + (* completions_presence_penalty_description *) + presence_penalty : float option; [@default None] + (* completions_frequency_penalty_description *) + frequency_penalty : float option; [@default None] + (* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. *) + logit_bias : Yojson.Safe.t option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_chat_completion_response.ml b/src/models/create_chat_completion_response.ml index acceeab..aef58ea 100644 --- a/src/models/create_chat_completion_response.ml +++ b/src/models/create_chat_completion_response.ml @@ -6,10 +6,11 @@ *) type t = { - id: string; - _object: string [@key "object"]; - created: int32; - model: string; - choices: Create_chat_completion_response_choices.t list; - usage: Create_completion_response_usage.t option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + id : string; + _object : string; [@key "object"] + created : int32; + model : string; + choices : Create_chat_completion_response_choices.t list; + usage : Create_completion_response_usage.t option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_chat_completion_response_choices.ml b/src/models/create_chat_completion_response_choices.ml index a6d1db3..d911033 100644 --- a/src/models/create_chat_completion_response_choices.ml +++ b/src/models/create_chat_completion_response_choices.ml @@ -6,7 +6,8 @@ *) type t = { - index: int32 option [@default None]; - message: Chat_completion_response_message.t; - finish_reason: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + index : int32 option; [@default None] + message : Chat_completion_response_message.t; + finish_reason : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_classification_request.ml b/src/models/create_classification_request.ml index 21e627b..2e03875 100644 --- a/src/models/create_classification_request.ml +++ b/src/models/create_classification_request.ml @@ -6,16 +6,17 @@ *) type t = { - (* Query to be classified. *) - query: string; - (* A list of examples with labels, in the following format: `[[\''The movie is so interesting.\'', \''Positive\''], [\''It is quite boring.\'', \''Negative\''], ...]` All the label strings will be normalized to be capitalized. You should specify either `examples` or `file`, but not both. *) - examples: string list list; - (* The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `examples` or `file`, but not both. *) - file: string option [@default None]; - (* The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. *) - labels: string list; - (* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. *) - temperature: float option [@default None]; - (* The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. *) - max_examples: int32 option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + (* Query to be classified. *) + query : string; + (* A list of examples with labels, in the following format: `[[\''The movie is so interesting.\'', \''Positive\''], [\''It is quite boring.\'', \''Negative\''], ...]` All the label strings will be normalized to be capitalized. You should specify either `examples` or `file`, but not both. *) + examples : string list list; + (* The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `examples` or `file`, but not both. *) + file : string option; [@default None] + (* The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. *) + labels : string list; + (* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. *) + temperature : float option; [@default None] + (* The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. *) + max_examples : int32 option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_classification_response.ml b/src/models/create_classification_response.ml index 27c850f..2262ff1 100644 --- a/src/models/create_classification_response.ml +++ b/src/models/create_classification_response.ml @@ -6,10 +6,11 @@ *) type t = { - _object: string option [@default None] [@key "object"]; - model: string option [@default None]; - search_model: string option [@default None]; - completion: string option [@default None]; - label: string option [@default None]; - selected_examples: Create_classification_response_selected_examples.t list; -} [@@deriving yojson { strict = false }, show, make ];; + _object : string option; [@default None] [@key "object"] + model : string option; [@default None] + search_model : string option; [@default None] + completion : string option; [@default None] + label : string option; [@default None] + selected_examples : Create_classification_response_selected_examples.t list; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_classification_response_selected_examples.ml b/src/models/create_classification_response_selected_examples.ml index 9b71bc9..605a2d1 100644 --- a/src/models/create_classification_response_selected_examples.ml +++ b/src/models/create_classification_response_selected_examples.ml @@ -6,7 +6,8 @@ *) type t = { - document: int32 option [@default None]; - text: string option [@default None]; - label: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + document : int32 option; [@default None] + text : string option; [@default None] + label : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_completion_request.ml b/src/models/create_completion_request.ml index 5350122..c78eab8 100644 --- a/src/models/create_completion_request.ml +++ b/src/models/create_completion_request.ml @@ -6,40 +6,42 @@ *) type t = { - (* ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. *) - model: string; - (** + (* ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. *) + model : string; + (** * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. * * Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. *) - prompt: MultiTypes.StringOrStringArrayOrIntArrayOrIntArrayArray.t option [@default None]; - (* The suffix that comes after a completion of inserted text. *) - suffix: string option [@default None]; - (* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). *) - max_tokens: int32 option [@default None]; - (* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. *) - temperature: float option [@default None]; - (* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. *) - top_p: float option [@default None]; - (* How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. *) - n: int32 option [@default None]; - (* Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. *) - stream: bool option [@default None]; - (* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. *) - logprobs: int32 option [@default None]; - (* Echo back the prompt in addition to the completion *) - echo: bool option [@default None]; - (* Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. *) - stop: string array option [@default None]; - (* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) *) - presence_penalty: float option [@default None]; - (* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) *) - frequency_penalty: float option [@default None]; - (* Generates `best_of` completions server-side and returns the \''best\'' (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. *) - best_of: int32 option [@default None]; - (* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\''50256\'': -100}` to prevent the <|endoftext|> token from being generated. *) - logit_bias: Yojson.Safe.t option [@default None]; - (* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). *) - user: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + prompt : MultiTypes.StringOrStringArrayOrIntArrayOrIntArrayArray.t option; + [@default None] + (* The suffix that comes after a completion of inserted text. *) + suffix : string option; [@default None] + (* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). *) + max_tokens : int32 option; [@default None] + (* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. *) + temperature : float option; [@default None] + (* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. *) + top_p : float option; [@default None] + (* How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. *) + n : int32 option; [@default None] + (* Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. *) + stream : bool option; [@default None] + (* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. *) + logprobs : int32 option; [@default None] + (* Echo back the prompt in addition to the completion *) + echo : bool option; [@default None] + (* Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. *) + stop : string array option; [@default None] + (* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) *) + presence_penalty : float option; [@default None] + (* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) *) + frequency_penalty : float option; [@default None] + (* Generates `best_of` completions server-side and returns the \''best\'' (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. *) + best_of : int32 option; [@default None] + (* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\''50256\'': -100}` to prevent the <|endoftext|> token from being generated. *) + logit_bias : Yojson.Safe.t option; [@default None] + (* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). *) + user : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_completion_response.ml b/src/models/create_completion_response.ml index 295e019..0587e6a 100644 --- a/src/models/create_completion_response.ml +++ b/src/models/create_completion_response.ml @@ -6,10 +6,11 @@ *) type t = { - id: string; - _object: string [@key "object"]; - created: int32; - model: string; - choices: Create_completion_response_choices.t list; - usage: Create_completion_response_usage.t option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + id : string; + _object : string; [@key "object"] + created : int32; + model : string; + choices : Create_completion_response_choices.t list; + usage : Create_completion_response_usage.t option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_completion_response_choices.ml b/src/models/create_completion_response_choices.ml index 297971e..0f3534f 100644 --- a/src/models/create_completion_response_choices.ml +++ b/src/models/create_completion_response_choices.ml @@ -6,8 +6,9 @@ *) type t = { - text: string option [@default None]; - index: int32 option [@default None]; - logprobs: Create_completion_response_logprobs.t option [@default None]; - finish_reason: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + text : string option; [@default None] + index : int32 option; [@default None] + logprobs : Create_completion_response_logprobs.t option; [@default None] + finish_reason : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_completion_response_logprobs.ml b/src/models/create_completion_response_logprobs.ml index 36277c8..bd0b1b6 100644 --- a/src/models/create_completion_response_logprobs.ml +++ b/src/models/create_completion_response_logprobs.ml @@ -6,8 +6,9 @@ *) type t = { - tokens: string list; - token_logprobs: float list; - top_logprobs: Yojson.Safe.t list; - text_offset: int32 list; -} [@@deriving yojson { strict = false }, show, make ];; + tokens : string list; + token_logprobs : float list; + top_logprobs : Yojson.Safe.t list; + text_offset : int32 list; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_completion_response_usage.ml b/src/models/create_completion_response_usage.ml index abd68b3..b952923 100644 --- a/src/models/create_completion_response_usage.ml +++ b/src/models/create_completion_response_usage.ml @@ -6,7 +6,8 @@ *) type t = { - prompt_tokens: int32; - completion_tokens: int32; - total_tokens: int32; -} [@@deriving yojson { strict = false }, show, make ];; + prompt_tokens : int32; + completion_tokens : int32; + total_tokens : int32; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_edit_request.ml b/src/models/create_edit_request.ml index 4929a40..c7679b2 100644 --- a/src/models/create_edit_request.ml +++ b/src/models/create_edit_request.ml @@ -6,16 +6,17 @@ *) type t = { - (* ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. *) - model: string; - (* The input text to use as a starting point for the edit. *) - input: string option [@default None]; - (* The instruction that tells the model how to edit the prompt. *) - instruction: string; - (* How many edits to generate for the input and instruction. *) - n: int32 option [@default None]; - (* completions_temperature_description *) - temperature: float option [@default None]; - (* completions_top_p_description *) - top_p: float option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + (* ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. *) + model : string; + (* The input text to use as a starting point for the edit. *) + input : string option; [@default None] + (* The instruction that tells the model how to edit the prompt. *) + instruction : string; + (* How many edits to generate for the input and instruction. *) + n : int32 option; [@default None] + (* completions_temperature_description *) + temperature : float option; [@default None] + (* completions_top_p_description *) + top_p : float option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_edit_response.ml b/src/models/create_edit_response.ml index 84cc4b4..42b8e7d 100644 --- a/src/models/create_edit_response.ml +++ b/src/models/create_edit_response.ml @@ -6,8 +6,9 @@ *) type t = { - _object: string [@key "object"]; - created: int32; - choices: Create_completion_response_choices.t list; - usage: Create_completion_response_usage.t; -} [@@deriving yojson { strict = false }, show, make ];; + _object : string; [@key "object"] + created : int32; + choices : Create_completion_response_choices.t list; + usage : Create_completion_response_usage.t; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_embedding_request.ml b/src/models/create_embedding_request.ml index 1c75469..e11ec7a 100644 --- a/src/models/create_embedding_request.ml +++ b/src/models/create_embedding_request.ml @@ -6,6 +6,7 @@ *) type t = { - (* Input text to get embeddings for, encoded as a string, array of strings, array of tokens, or array of token arrays. Each input must not exceed 8192 tokens in length. *) - input: MultiTypes.StringOrStringArrayOrIntArrayOrIntArrayArray.t; -} [@@deriving yojson { strict = false }, show, make ];; + (* Input text to get embeddings for, encoded as a string, array of strings, array of tokens, or array of token arrays. Each input must not exceed 8192 tokens in length. *) + input : MultiTypes.StringOrStringArrayOrIntArrayOrIntArrayArray.t; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_embedding_response.ml b/src/models/create_embedding_response.ml index bb1a412..e664deb 100644 --- a/src/models/create_embedding_response.ml +++ b/src/models/create_embedding_response.ml @@ -6,8 +6,9 @@ *) type t = { - _object: string [@key "object"]; - model: string; - data: Create_embedding_response_data.t list; - usage: Create_embedding_response_usage.t; -} [@@deriving yojson { strict = false }, show, make ];; + _object : string; [@key "object"] + model : string; + data : Create_embedding_response_data.t list; + usage : Create_embedding_response_usage.t; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_embedding_response_data.ml b/src/models/create_embedding_response_data.ml index 8d55368..3bd24b9 100644 --- a/src/models/create_embedding_response_data.ml +++ b/src/models/create_embedding_response_data.ml @@ -6,7 +6,8 @@ *) type t = { - index: int32; - _object: string [@key "object"]; - embedding: float list; -} [@@deriving yojson { strict = false }, show, make ];; + index : int32; + _object : string; [@key "object"] + embedding : float list; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_embedding_response_usage.ml b/src/models/create_embedding_response_usage.ml index 06da374..5747adc 100644 --- a/src/models/create_embedding_response_usage.ml +++ b/src/models/create_embedding_response_usage.ml @@ -5,7 +5,5 @@ * *) -type t = { - prompt_tokens: int32; - total_tokens: int32; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { prompt_tokens : int32; total_tokens : int32 } +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_file_request.ml b/src/models/create_file_request.ml index a8526d3..ce03f69 100644 --- a/src/models/create_file_request.ml +++ b/src/models/create_file_request.ml @@ -6,8 +6,9 @@ *) type t = { - (* Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \''fine-tune\'', each line is a JSON record with \''prompt\'' and \''completion\'' fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data). *) - file: string; - (* The intended purpose of the uploaded documents. Use \''fine-tune\'' for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file. *) - purpose: string; -} [@@deriving yojson { strict = false }, show, make ];; + (* Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \''fine-tune\'', each line is a JSON record with \''prompt\'' and \''completion\'' fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data). *) + file : string; + (* The intended purpose of the uploaded documents. Use \''fine-tune\'' for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file. *) + purpose : string; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_fine_tune_request.ml b/src/models/create_fine_tune_request.ml index 283d73b..1b2fe26 100644 --- a/src/models/create_fine_tune_request.ml +++ b/src/models/create_fine_tune_request.ml @@ -6,28 +6,29 @@ *) type t = { - (* The ID of an uploaded file that contains training data. See [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys \''prompt\'' and \''completion\''. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. *) - training_file: string; - (* The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys \''prompt\'' and \''completion\''. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. *) - validation_file: string option [@default None]; - (* The name of the base model to fine-tune. You can select one of \''ada\'', \''babbage\'', \''curie\'', \''davinci\'', or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://platform.openai.com/docs/models) documentation. *) - model: string option [@default None]; - (* The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. *) - n_epochs: int32 option [@default None]; - (* The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, we've found that larger batch sizes tend to work better for larger datasets. *) - batch_size: int32 option [@default None]; - (* The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final `batch_size` (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results. *) - learning_rate_multiplier: float option [@default None]; - (* The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short. If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt. *) - prompt_loss_weight: float option [@default None]; - (* If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). In order to compute classification metrics, you must provide a `validation_file`. Additionally, you must specify `classification_n_classes` for multiclass classification or `classification_positive_class` for binary classification. *) - compute_classification_metrics: bool option [@default None]; - (* The number of classes in a classification task. This parameter is required for multiclass classification. *) - classification_n_classes: int32 option [@default None]; - (* The positive class in binary classification. This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification. *) - classification_positive_class: string option [@default None]; - (* If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall. *) - classification_betas: float list; - (* A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \''custom-model-name\'' would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. *) - suffix: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + (* The ID of an uploaded file that contains training data. See [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys \''prompt\'' and \''completion\''. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. *) + training_file : string; + (* The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys \''prompt\'' and \''completion\''. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. *) + validation_file : string option; [@default None] + (* The name of the base model to fine-tune. You can select one of \''ada\'', \''babbage\'', \''curie\'', \''davinci\'', or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://platform.openai.com/docs/models) documentation. *) + model : string option; [@default None] + (* The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. *) + n_epochs : int32 option; [@default None] + (* The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, we've found that larger batch sizes tend to work better for larger datasets. *) + batch_size : int32 option; [@default None] + (* The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final `batch_size` (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results. *) + learning_rate_multiplier : float option; [@default None] + (* The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short. If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt. *) + prompt_loss_weight : float option; [@default None] + (* If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). In order to compute classification metrics, you must provide a `validation_file`. Additionally, you must specify `classification_n_classes` for multiclass classification or `classification_positive_class` for binary classification. *) + compute_classification_metrics : bool option; [@default None] + (* The number of classes in a classification task. This parameter is required for multiclass classification. *) + classification_n_classes : int32 option; [@default None] + (* The positive class in binary classification. This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification. *) + classification_positive_class : string option; [@default None] + (* If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall. *) + classification_betas : float list; + (* A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \''custom-model-name\'' would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. *) + suffix : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_image_edit_request.ml b/src/models/create_image_edit_request.ml index 4c860ed..bb6b99e 100644 --- a/src/models/create_image_edit_request.ml +++ b/src/models/create_image_edit_request.ml @@ -6,10 +6,11 @@ *) type t = { - (* The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. *) - image: string; - (* An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. *) - mask: string option [@default None]; - (* A text description of the desired image(s). The maximum length is 1000 characters. *) - prompt: string; -} [@@deriving yojson { strict = false }, show, make ];; + (* The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. *) + image : string; + (* An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. *) + mask : string option; [@default None] + (* A text description of the desired image(s). The maximum length is 1000 characters. *) + prompt : string; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_image_request.ml b/src/models/create_image_request.ml index 45b686c..798cb94 100644 --- a/src/models/create_image_request.ml +++ b/src/models/create_image_request.ml @@ -6,12 +6,13 @@ *) type t = { - (* A text description of the desired image(s). The maximum length is 1000 characters. *) - prompt: string; - (* The number of images to generate. Must be between 1 and 10. *) - n: int32 option [@default None]; - (* The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. *) - size: Enums.size option [@default Some(`_1024x1024)]; - (* The format in which the generated images are returned. Must be one of `url` or `b64_json`. *) - response_format: Enums.response_format option [@default Some(`Url)]; -} [@@deriving yojson { strict = false }, show, make ];; + (* A text description of the desired image(s). The maximum length is 1000 characters. *) + prompt : string; + (* The number of images to generate. Must be between 1 and 10. *) + n : int32 option; [@default None] + (* The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. *) + size : Enums.size option; [@default Some `_1024x1024] + (* The format in which the generated images are returned. Must be one of `url` or `b64_json`. *) + response_format : Enums.response_format option; [@default Some `Url] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_image_variation_request.ml b/src/models/create_image_variation_request.ml index 875f8de..2531e23 100644 --- a/src/models/create_image_variation_request.ml +++ b/src/models/create_image_variation_request.ml @@ -6,6 +6,7 @@ *) type t = { - (* The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. *) - image: string; -} [@@deriving yojson { strict = false }, show, make ];; + (* The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. *) + image : string; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_moderation_request.ml b/src/models/create_moderation_request.ml index 15c63dc..499751d 100644 --- a/src/models/create_moderation_request.ml +++ b/src/models/create_moderation_request.ml @@ -6,8 +6,9 @@ *) type t = { - (* The input text to classify *) - input: string array; - (* Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. *) - model: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + (* The input text to classify *) + input : string array; + (* Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. *) + model : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_moderation_response.ml b/src/models/create_moderation_response.ml index 701fccc..0d92b4d 100644 --- a/src/models/create_moderation_response.ml +++ b/src/models/create_moderation_response.ml @@ -6,7 +6,8 @@ *) type t = { - id: string; - model: string; - results: Create_moderation_response_results.t list; -} [@@deriving yojson { strict = false }, show, make ];; + id : string; + model : string; + results : Create_moderation_response_results.t list; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_moderation_response_categories.ml b/src/models/create_moderation_response_categories.ml index 9b75212..f50610d 100644 --- a/src/models/create_moderation_response_categories.ml +++ b/src/models/create_moderation_response_categories.ml @@ -6,11 +6,12 @@ *) type t = { - hate: bool; - hate_threatening: bool; - self_harm: bool; - sexual: bool; - sexual_minors: bool; - violence: bool; - violence_graphic: bool; -} [@@deriving yojson { strict = false }, show, make ];; + hate : bool; + hate_threatening : bool; + self_harm : bool; + sexual : bool; + sexual_minors : bool; + violence : bool; + violence_graphic : bool; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_moderation_response_category_scores.ml b/src/models/create_moderation_response_category_scores.ml index 1c86b63..696d154 100644 --- a/src/models/create_moderation_response_category_scores.ml +++ b/src/models/create_moderation_response_category_scores.ml @@ -6,11 +6,12 @@ *) type t = { - hate: float; - hate_threatening: float; - self_harm: float; - sexual: float; - sexual_minors: float; - violence: float; - violence_graphic: float; -} [@@deriving yojson { strict = false }, show, make ];; + hate : float; + hate_threatening : float; + self_harm : float; + sexual : float; + sexual_minors : float; + violence : float; + violence_graphic : float; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_moderation_response_results.ml b/src/models/create_moderation_response_results.ml index 2ebccf2..92e4003 100644 --- a/src/models/create_moderation_response_results.ml +++ b/src/models/create_moderation_response_results.ml @@ -6,7 +6,8 @@ *) type t = { - flagged: bool; - categories: Create_moderation_response_categories.t; - category_scores: Create_moderation_response_category_scores.t; -} [@@deriving yojson { strict = false }, show, make ];; + flagged : bool; + categories : Create_moderation_response_categories.t; + category_scores : Create_moderation_response_category_scores.t; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_search_request.ml b/src/models/create_search_request.ml index 21add48..3add7ab 100644 --- a/src/models/create_search_request.ml +++ b/src/models/create_search_request.ml @@ -6,14 +6,15 @@ *) type t = { - (* Query to search against the documents. *) - query: string; - (* Up to 200 documents to search over, provided as a list of strings. The maximum document length (in tokens) is 2034 minus the number of tokens in the query. You should specify either `documents` or a `file`, but not both. *) - documents: string list; - (* The ID of an uploaded file that contains documents to search over. You should specify either `documents` or a `file`, but not both. *) - file: string option [@default None]; - (* The maximum number of documents to be re-ranked and returned by search. This flag only takes effect when `file` is set. *) - max_rerank: int32 option [@default None]; - (* A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \''metadata\'' field. This flag only takes effect when `file` is set. *) - return_metadata: bool option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + (* Query to search against the documents. *) + query : string; + (* Up to 200 documents to search over, provided as a list of strings. The maximum document length (in tokens) is 2034 minus the number of tokens in the query. You should specify either `documents` or a `file`, but not both. *) + documents : string list; + (* The ID of an uploaded file that contains documents to search over. You should specify either `documents` or a `file`, but not both. *) + file : string option; [@default None] + (* The maximum number of documents to be re-ranked and returned by search. This flag only takes effect when `file` is set. *) + max_rerank : int32 option; [@default None] + (* A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \''metadata\'' field. This flag only takes effect when `file` is set. *) + return_metadata : bool option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_search_response.ml b/src/models/create_search_response.ml index d4e9f6b..68caf16 100644 --- a/src/models/create_search_response.ml +++ b/src/models/create_search_response.ml @@ -6,7 +6,8 @@ *) type t = { - _object: string option [@default None] [@key "object"]; - model: string option [@default None]; - data: Create_search_response_data.t list; -} [@@deriving yojson { strict = false }, show, make ];; + _object : string option; [@default None] [@key "object"] + model : string option; [@default None] + data : Create_search_response_data.t list; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_search_response_data.ml b/src/models/create_search_response_data.ml index 5022e0d..a68ba4a 100644 --- a/src/models/create_search_response_data.ml +++ b/src/models/create_search_response_data.ml @@ -6,7 +6,8 @@ *) type t = { - _object: string option [@default None] [@key "object"]; - document: int32 option [@default None]; - score: float option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + _object : string option; [@default None] [@key "object"] + document : int32 option; [@default None] + score : float option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_transcription_request.ml b/src/models/create_transcription_request.ml index 172f7e0..b3fabd6 100644 --- a/src/models/create_transcription_request.ml +++ b/src/models/create_transcription_request.ml @@ -6,16 +6,17 @@ *) type t = { - (* The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. *) - file: string; - (* ID of the model to use. Only `whisper-1` is currently available. *) - model: string; - (* An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. *) - prompt: string option [@default None]; - (* The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. *) - response_format: string option [@default None]; - (* The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. *) - temperature: float option [@default None]; - (* The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. *) - language: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + (* The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. *) + file : string; + (* ID of the model to use. Only `whisper-1` is currently available. *) + model : string; + (* An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. *) + prompt : string option; [@default None] + (* The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. *) + response_format : string option; [@default None] + (* The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. *) + temperature : float option; [@default None] + (* The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. *) + language : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_transcription_response.ml b/src/models/create_transcription_response.ml index 19086f4..d46ee79 100644 --- a/src/models/create_transcription_response.ml +++ b/src/models/create_transcription_response.ml @@ -5,6 +5,4 @@ * *) -type t = { - text: string; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { text : string } [@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_translation_request.ml b/src/models/create_translation_request.ml index cb931ed..eeefe02 100644 --- a/src/models/create_translation_request.ml +++ b/src/models/create_translation_request.ml @@ -6,14 +6,15 @@ *) type t = { - (* The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. *) - file: string; - (* ID of the model to use. Only `whisper-1` is currently available. *) - model: string; - (* An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. *) - prompt: string option [@default None]; - (* The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. *) - response_format: string option [@default None]; - (* The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. *) - temperature: float option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + (* The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. *) + file : string; + (* ID of the model to use. Only `whisper-1` is currently available. *) + model : string; + (* An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. *) + prompt : string option; [@default None] + (* The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. *) + response_format : string option; [@default None] + (* The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. *) + temperature : float option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/create_translation_response.ml b/src/models/create_translation_response.ml index 19086f4..d46ee79 100644 --- a/src/models/create_translation_response.ml +++ b/src/models/create_translation_response.ml @@ -5,6 +5,4 @@ * *) -type t = { - text: string; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { text : string } [@@deriving yojson { strict = false }, show, make] diff --git a/src/models/delete_file_response.ml b/src/models/delete_file_response.ml index f9d7098..b3065d4 100644 --- a/src/models/delete_file_response.ml +++ b/src/models/delete_file_response.ml @@ -5,8 +5,5 @@ * *) -type t = { - id: string; - _object: string [@key "object"]; - deleted: bool; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { id : string; _object : string; [@key "object"] deleted : bool } +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/delete_model_response.ml b/src/models/delete_model_response.ml index f9d7098..b3065d4 100644 --- a/src/models/delete_model_response.ml +++ b/src/models/delete_model_response.ml @@ -5,8 +5,5 @@ * *) -type t = { - id: string; - _object: string [@key "object"]; - deleted: bool; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { id : string; _object : string; [@key "object"] deleted : bool } +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/engine.ml b/src/models/engine.ml index ef75d32..1d67ad2 100644 --- a/src/models/engine.ml +++ b/src/models/engine.ml @@ -6,8 +6,9 @@ *) type t = { - id: string; - _object: string [@key "object"]; - created: int32 option; - ready: bool; -} [@@deriving yojson { strict = false }, show, make ];; + id : string; + _object : string; [@key "object"] + created : int32 option; + ready : bool; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/fine_tune.ml b/src/models/fine_tune.ml index 1bbcb7f..b6bc255 100644 --- a/src/models/fine_tune.ml +++ b/src/models/fine_tune.ml @@ -6,17 +6,18 @@ *) type t = { - id: string; - _object: string [@key "object"]; - created_at: int32; - updated_at: int32; - model: string; - fine_tuned_model: string option; - organization_id: string; - status: string; - hyperparams: Yojson.Safe.t; - training_files: Open_ai_file.t list; - validation_files: Open_ai_file.t list; - result_files: Open_ai_file.t list; - events: Fine_tune_event.t list; -} [@@deriving yojson { strict = false }, show, make ];; + id : string; + _object : string; [@key "object"] + created_at : int32; + updated_at : int32; + model : string; + fine_tuned_model : string option; + organization_id : string; + status : string; + hyperparams : Yojson.Safe.t; + training_files : Open_ai_file.t list; + validation_files : Open_ai_file.t list; + result_files : Open_ai_file.t list; + events : Fine_tune_event.t list; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/fine_tune_event.ml b/src/models/fine_tune_event.ml index dfce1cd..b11f2b6 100644 --- a/src/models/fine_tune_event.ml +++ b/src/models/fine_tune_event.ml @@ -6,8 +6,9 @@ *) type t = { - _object: string [@key "object"]; - created_at: int32; - level: string; - message: string; -} [@@deriving yojson { strict = false }, show, make ];; + _object : string; [@key "object"] + created_at : int32; + level : string; + message : string; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/images_response.ml b/src/models/images_response.ml index 8a047d9..25c3513 100644 --- a/src/models/images_response.ml +++ b/src/models/images_response.ml @@ -5,7 +5,5 @@ * *) -type t = { - created: int32; - data: Images_response_data.t list; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { created : int32; data : Images_response_data.t list } +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/images_response_data.ml b/src/models/images_response_data.ml index e7342df..3181881 100644 --- a/src/models/images_response_data.ml +++ b/src/models/images_response_data.ml @@ -6,6 +6,7 @@ *) type t = { - url: string option [@default None]; - b64_json: string option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + url : string option; [@default None] + b64_json : string option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/list_engines_response.ml b/src/models/list_engines_response.ml index 46ebaf7..dc6b8a9 100644 --- a/src/models/list_engines_response.ml +++ b/src/models/list_engines_response.ml @@ -5,7 +5,5 @@ * *) -type t = { - _object: string [@key "object"]; - data: Engine.t list; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { _object : string; [@key "object"] data : Engine.t list } +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/list_files_response.ml b/src/models/list_files_response.ml index d170b7c..e5f3e5f 100644 --- a/src/models/list_files_response.ml +++ b/src/models/list_files_response.ml @@ -5,7 +5,5 @@ * *) -type t = { - _object: string [@key "object"]; - data: Open_ai_file.t list; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { _object : string; [@key "object"] data : Open_ai_file.t list } +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/list_fine_tune_events_response.ml b/src/models/list_fine_tune_events_response.ml index 543d2ee..8b10235 100644 --- a/src/models/list_fine_tune_events_response.ml +++ b/src/models/list_fine_tune_events_response.ml @@ -5,7 +5,5 @@ * *) -type t = { - _object: string [@key "object"]; - data: Fine_tune_event.t list; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { _object : string; [@key "object"] data : Fine_tune_event.t list } +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/list_fine_tunes_response.ml b/src/models/list_fine_tunes_response.ml index 4a85a23..eb66f19 100644 --- a/src/models/list_fine_tunes_response.ml +++ b/src/models/list_fine_tunes_response.ml @@ -5,7 +5,5 @@ * *) -type t = { - _object: string [@key "object"]; - data: Fine_tune.t list; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { _object : string; [@key "object"] data : Fine_tune.t list } +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/list_models_response.ml b/src/models/list_models_response.ml index 1d9bded..b6b7edf 100644 --- a/src/models/list_models_response.ml +++ b/src/models/list_models_response.ml @@ -5,7 +5,5 @@ * *) -type t = { - _object: string [@key "object"]; - data: Model.t list; -} [@@deriving yojson { strict = false }, show, make ];; +type t = { _object : string; [@key "object"] data : Model.t list } +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/model.ml b/src/models/model.ml index c9ad4ee..92be9c1 100644 --- a/src/models/model.ml +++ b/src/models/model.ml @@ -6,8 +6,9 @@ *) type t = { - id: string; - _object: string [@key "object"]; - created: int32; - owned_by: string; -} [@@deriving yojson { strict = false }, show, make ];; + id : string; + _object : string; [@key "object"] + created : int32; + owned_by : string; +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/models/open_ai_file.ml b/src/models/open_ai_file.ml index 805a020..656ef25 100644 --- a/src/models/open_ai_file.ml +++ b/src/models/open_ai_file.ml @@ -6,12 +6,13 @@ *) type t = { - id: string; - _object: string [@key "object"]; - bytes: int32; - created_at: int32; - filename: string; - purpose: string; - status: string option [@default None]; - status_details: Yojson.Safe.t option [@default None]; -} [@@deriving yojson { strict = false }, show, make ];; + id : string; + _object : string; [@key "object"] + bytes : int32; + created_at : int32; + filename : string; + purpose : string; + status : string option; [@default None] + status_details : Yojson.Safe.t option; [@default None] +} +[@@deriving yojson { strict = false }, show, make] diff --git a/src/support/enums.ml b/src/support/enums.ml index 114d437..b1a4755 100644 --- a/src/support/enums.ml +++ b/src/support/enums.ml @@ -5,37 +5,48 @@ * *) -type size = [ -| `_256x256 [@printer fun fmt _ -> Format.pp_print_string fmt "256x256"] [@name "256x256"] -| `_512x512 [@printer fun fmt _ -> Format.pp_print_string fmt "512x512"] [@name "512x512"] -| `_1024x1024 [@printer fun fmt _ -> Format.pp_print_string fmt "1024x1024"] [@name "1024x1024"] -] [@@deriving yojson, show { with_path = false }];; +type size = + [ `_256x256 + [@printer fun fmt _ -> Format.pp_print_string fmt "256x256"] + [@name "256x256"] + | `_512x512 + [@printer fun fmt _ -> Format.pp_print_string fmt "512x512"] + [@name "512x512"] + | `_1024x1024 + [@printer fun fmt _ -> Format.pp_print_string fmt "1024x1024"] + [@name "1024x1024"] ] +[@@deriving yojson, show { with_path = false }] + +let size_of_yojson json = size_of_yojson (`List [ json ]) -let size_of_yojson json = size_of_yojson (`List [json]) let size_to_yojson e = - match size_to_yojson e with - | `List [json] -> json - | json -> json + match size_to_yojson e with `List [ json ] -> json | json -> json + +type role = + [ `System + [@printer fun fmt _ -> Format.pp_print_string fmt "system"] + [@name "system"] + | `User + [@printer fun fmt _ -> Format.pp_print_string fmt "user"] + [@name "user"] + | `Assistant + [@printer fun fmt _ -> Format.pp_print_string fmt "assistant"] + [@name "assistant"] ] +[@@deriving yojson, show { with_path = false }] -type role = [ -| `System [@printer fun fmt _ -> Format.pp_print_string fmt "system"] [@name "system"] -| `User [@printer fun fmt _ -> Format.pp_print_string fmt "user"] [@name "user"] -| `Assistant [@printer fun fmt _ -> Format.pp_print_string fmt "assistant"] [@name "assistant"] -] [@@deriving yojson, show { with_path = false }];; +let role_of_yojson json = role_of_yojson (`List [ json ]) -let role_of_yojson json = role_of_yojson (`List [json]) let role_to_yojson e = - match role_to_yojson e with - | `List [json] -> json - | json -> json + match role_to_yojson e with `List [ json ] -> json | json -> json + +type response_format = + [ `Url [@printer fun fmt _ -> Format.pp_print_string fmt "url"] [@name "url"] + | `B64_json + [@printer fun fmt _ -> Format.pp_print_string fmt "b64_json"] + [@name "b64_json"] ] +[@@deriving yojson, show { with_path = false }] -type response_format = [ -| `Url [@printer fun fmt _ -> Format.pp_print_string fmt "url"] [@name "url"] -| `B64_json [@printer fun fmt _ -> Format.pp_print_string fmt "b64_json"] [@name "b64_json"] -] [@@deriving yojson, show { with_path = false }];; +let response_format_of_yojson json = response_format_of_yojson (`List [ json ]) -let response_format_of_yojson json = response_format_of_yojson (`List [json]) let response_format_to_yojson e = - match response_format_to_yojson e with - | `List [json] -> json - | json -> json + match response_format_to_yojson e with `List [ json ] -> json | json -> json diff --git a/src/support/jsonSupport.ml b/src/support/jsonSupport.ml index 4b0fac7..0932c69 100644 --- a/src/support/jsonSupport.ml +++ b/src/support/jsonSupport.ml @@ -1,55 +1,42 @@ open Ppx_deriving_yojson_runtime let unwrap to_json json = - match to_json json with - | Result.Ok json -> json - | Result.Error s -> failwith s + match to_json json with + | Result.Ok json -> json + | Result.Error s -> failwith s let to_int json = - match json with - | `Int x -> x - | `Intlit s -> int_of_string s - | _ -> failwith "JsonSupport.to_int" + match json with + | `Int x -> x + | `Intlit s -> int_of_string s + | _ -> failwith "JsonSupport.to_int" let to_bool json = - match json with - | `Bool x -> x - | _ -> failwith "JsonSupport.to_bool" + match json with `Bool x -> x | _ -> failwith "JsonSupport.to_bool" let to_float json = - match json with - | `Float x -> x - | _ -> failwith "JsonSupport.to_float" + match json with `Float x -> x | _ -> failwith "JsonSupport.to_float" let to_string json = - match json with - | `String s -> s - | _ -> failwith "JsonSupport.to_string" + match json with `String s -> s | _ -> failwith "JsonSupport.to_string" let to_int32 json : int32 = - match json with - | `Int x -> Int32.of_int x - | `Intlit s -> Int32.of_string s - | _ -> failwith "JsonSupport.to_int32" + match json with + | `Int x -> Int32.of_int x + | `Intlit s -> Int32.of_string s + | _ -> failwith "JsonSupport.to_int32" let to_int64 json : int64 = - match json with - | `Int x -> Int64.of_int x - | `Intlit s -> Int64.of_string s - | _ -> failwith "JsonSupport.to_int64" + match json with + | `Int x -> Int64.of_int x + | `Intlit s -> Int64.of_string s + | _ -> failwith "JsonSupport.to_int64" let of_int x = `Int x - let of_bool b = `Bool b - let of_float x = `Float x - let of_string s = `String s - let of_int32 x = `Intlit (Int32.to_string x) - let of_int64 x = `Intlit (Int64.to_string x) - let of_list_of of_f l = `List (List.map of_f l) - let of_map_of of_f l = `Assoc (List.map (fun (k, v) -> (k, of_f v)) l) \ No newline at end of file diff --git a/src/support/multiTypes.ml b/src/support/multiTypes.ml index d7b7400..2a9c08b 100644 --- a/src/support/multiTypes.ml +++ b/src/support/multiTypes.ml @@ -1,76 +1,85 @@ -let stringArray_of_yojson x: ([> `StringArray of string array], string) result = +let stringArray_of_yojson x : ([> `StringArray of string array ], string) result + = match x with | `List l -> - let rec aux = function - | [] -> Ok [] - | `String s :: tl -> - Result.map (fun l -> s :: l) (aux tl) - | _ -> Error "stringArray_of_yojson: not a string array" - in - Result.map (fun rl -> `StringArray(Array.of_list rl)) (aux l) + let rec aux = function + | [] -> Ok [] + | `String s :: tl -> Result.map (fun l -> s :: l) (aux tl) + | _ -> Error "stringArray_of_yojson: not a string array" + in + Result.map (fun rl -> `StringArray (Array.of_list rl)) (aux l) | _ -> Error "stringArray_of_yojson: not a string array" -let intArray_of_yojson x: ([> `IntArray of int array], string) result = +let intArray_of_yojson x : ([> `IntArray of int array ], string) result = match x with | `List l -> - let rec aux = function - | [] -> Ok [] - | `Int i :: tl -> - Result.map (fun l -> i :: l) (aux tl) - | _ -> Error "intArray_of_yojson: not an int array" - in - Result.map (fun rl -> `IntArray(Array.of_list rl)) (aux l) + let rec aux = function + | [] -> Ok [] + | `Int i :: tl -> Result.map (fun l -> i :: l) (aux tl) + | _ -> Error "intArray_of_yojson: not an int array" + in + Result.map (fun rl -> `IntArray (Array.of_list rl)) (aux l) | _ -> Error "intArray_of_yojson: not an int array" - (* TODO: see if there's a less awful way to organize this *) +(* TODO: see if there's a less awful way to organize this *) module StringOrStringArrayOrIntArrayOrIntArrayArray = struct type t = [ `String of string | `StringArray of string array | `IntArray of int array - | `IntArrayArray of int array array - ] - + | `IntArrayArray of int array array ] + let pp ppf = function | `String s -> Format.fprintf ppf "%s" s - | `StringArray a -> Format.fprintf ppf "%a" (Format.pp_print_list Format.pp_print_string) (Array.to_list a) - | `IntArray a -> Format.fprintf ppf "%a" (Format.pp_print_list Format.pp_print_int) (Array.to_list a) + | `StringArray a -> + Format.fprintf ppf "%a" + (Format.pp_print_list Format.pp_print_string) + (Array.to_list a) + | `IntArray a -> + Format.fprintf ppf "%a" + (Format.pp_print_list Format.pp_print_int) + (Array.to_list a) | `IntArrayArray a -> - Format.fprintf - ppf - "%a" - (Format.pp_print_list (Format.pp_print_list Format.pp_print_int)) (Array.to_list (Array.map Array.to_list a)) + Format.fprintf ppf "%a" + (Format.pp_print_list (Format.pp_print_list Format.pp_print_int)) + (Array.to_list (Array.map Array.to_list a)) - let to_yojson (t: t): Yojson.Safe.t = + let to_yojson (t : t) : Yojson.Safe.t = match t with | `String s -> `String s | `StringArray a -> `List (List.map (fun s -> `String s) (Array.to_list a)) | `IntArray a -> `List (List.map (fun i -> `Int i) (Array.to_list a)) - | `IntArrayArray a -> `List (List.map (fun a -> `List (List.map (fun i -> `Int i) (Array.to_list a))) (Array.to_list a)) - - let of_yojson (x: Yojson.Safe.t): (t, string) result = - let errorMessage = "StringOrStringArrayOrIntArrayOrIntArrayArray.of_yojson: not a string or string array or int array or int array array" in + | `IntArrayArray a -> + `List + (List.map + (fun a -> `List (List.map (fun i -> `Int i) (Array.to_list a))) + (Array.to_list a)) + + let of_yojson (x : Yojson.Safe.t) : (t, string) result = + let errorMessage = + "StringOrStringArrayOrIntArrayOrIntArrayArray.of_yojson: not a string or \ + string array or int array or int array array" + in match x with | `String s -> Ok (`String s) - | `List l -> - (* may still be a string array, int array, or int array array, we can tell by looking at the first element *) - begin match l with - | [] -> Ok(`StringArray [||]) - | `String _ :: _ -> stringArray_of_yojson x - | `Int _ :: _ -> intArray_of_yojson x - | `List _ :: _ -> - (* either an int array array, or invalid *) - let rec aux = function - | [] -> Ok [] - | `List l :: tl -> - let ( let* ) = Result.bind in - let* `IntArray l' = intArray_of_yojson (`List l) in - let* tl' = aux tl in - Ok (l' :: tl') - | _ -> Error errorMessage - in - Result.map (fun rl -> `IntArrayArray(Array.of_list rl)) (aux l) - | _ -> Error errorMessage - end + | `List l -> ( + (* may still be a string array, int array, or int array array, we can tell by looking at the first element *) + match l with + | [] -> Ok (`StringArray [||]) + | `String _ :: _ -> stringArray_of_yojson x + | `Int _ :: _ -> intArray_of_yojson x + | `List _ :: _ -> + (* either an int array array, or invalid *) + let rec aux = function + | [] -> Ok [] + | `List l :: tl -> + let ( let* ) = Result.bind in + let* (`IntArray l') = intArray_of_yojson (`List l) in + let* tl' = aux tl in + Ok (l' :: tl') + | _ -> Error errorMessage + in + Result.map (fun rl -> `IntArrayArray (Array.of_list rl)) (aux l) + | _ -> Error errorMessage) | _ -> Error errorMessage end \ No newline at end of file diff --git a/src/support/request.ml b/src/support/request.ml index f6a5ef8..506f1cc 100644 --- a/src/support/request.ml +++ b/src/support/request.ml @@ -1,20 +1,14 @@ let api_key = Sys.getenv "OPENAI_API_KEY" let base_url = "https://api.openai.com/v1" + let default_headers = Cohttp.Header.add (Cohttp.Header.init_with "Content-Type" "application/json") - "Authorization" - ("Bearer " ^ api_key) - -let option_fold f default o = - match o with - | Some v -> f v - | None -> default + "Authorization" ("Bearer " ^ api_key) +let option_fold f default o = match o with Some v -> f v | None -> default let build_uri operation_path = Uri.of_string (base_url ^ operation_path) - -let add_string_header headers key value = - Cohttp.Header.add headers key value +let add_string_header headers key value = Cohttp.Header.add headers key value let add_string_header_multi headers key values = Cohttp.Header.add_multi headers key values @@ -41,13 +35,16 @@ let write_as_json_body to_json payload = write_json_body (to_json payload) let handle_response resp on_success_handler = match Cohttp_lwt.Response.status resp with | #Cohttp.Code.success_status -> on_success_handler () - | s -> failwith ("Server responded with status " ^ Cohttp.Code.(reason_phrase_of_code (code_of_status s))) + | s -> + failwith + ("Server responded with status " + ^ Cohttp.Code.(reason_phrase_of_code (code_of_status s))) + +let handle_unit_response resp = handle_response resp (fun () -> Lwt.return ()) -let handle_unit_response resp = handle_response resp (fun () -> Lwt.return ()) - let read_json_body resp body = handle_response resp (fun () -> - (Lwt.(Cohttp_lwt.Body.to_string body >|= Yojson.Safe.from_string))) + Lwt.(Cohttp_lwt.Body.to_string body >|= Yojson.Safe.from_string)) let read_json_body_as of_json resp body = Lwt.(read_json_body resp body >|= of_json) @@ -62,11 +59,14 @@ let read_json_body_as_map resp body = Lwt.(read_json_body resp body >|= Yojson.Safe.Util.to_assoc) let read_json_body_as_map_of of_json resp body = - Lwt.(read_json_body_as_map resp body >|= List.map (fun (s, v) -> (s, of_json v))) + Lwt.( + read_json_body_as_map resp body >|= List.map (fun (s, v) -> (s, of_json v))) let replace_string_path_param uri param_name param_value = let regexp = Str.regexp (Str.quote ("{" ^ param_name ^ "}")) in - let path = Str.global_replace regexp param_value (Uri.pct_decode (Uri.path uri)) in + let path = + Str.global_replace regexp param_value (Uri.pct_decode (Uri.path uri)) + in Uri.with_path uri path let replace_path_param uri param_name to_string param_value = @@ -87,15 +87,21 @@ let maybe_add_query_param uri param_name to_string param_value = let init_form_encoded_body () = "" let add_form_encoded_body_param params param_name to_string param_value = - let new_param_enc = Printf.sprintf {|%s=%s|} (Uri.pct_encode param_name) (Uri.pct_encode (to_string param_value)) in - if params = "" - then new_param_enc + let new_param_enc = + Printf.sprintf {|%s=%s|} + (Uri.pct_encode param_name) + (Uri.pct_encode (to_string param_value)) + in + if params = "" then new_param_enc else Printf.sprintf {|%s&%s|} params new_param_enc let add_form_encoded_body_param_list params param_name to_string new_params = - add_form_encoded_body_param params param_name (String.concat ",") (to_string new_params) + add_form_encoded_body_param params param_name (String.concat ",") + (to_string new_params) let maybe_add_form_encoded_body_param params param_name to_string param_value = - option_fold (add_form_encoded_body_param params param_name to_string) params param_value + option_fold + (add_form_encoded_body_param params param_name to_string) + params param_value let finalize_form_encoded_body body = Cohttp_lwt.Body.of_string body