diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index d953b3191e..3c53b99a17 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -94,6 +94,11 @@ jobs: run: python -m development.docs.write_cli_docs working-directory: ./inference_repo + - name: Write OpenAPI spec + run: python -m development.docs.write_openapi_spec + working-directory: ./inference_repo + + - name: Deploy docs # Only deploy if release event OR if deploy input was set to true if: ${{ github.event_name == 'release' || github.event.inputs.deploy == 'true' }} diff --git a/.gitignore b/.gitignore index 820584f2e5..346782eb7e 100644 --- a/.gitignore +++ b/.gitignore @@ -168,6 +168,7 @@ docs/workflows/blocks/* docs/workflows/kinds/* docs/workflows/gallery/* docs/inference_helpers/cli_commands/reference.md +docs/openapi.json !tests/workflows/integration_tests/execution/assets/*.jpg !tests/workflows/integration_tests/execution/assets/rock_paper_scissors/*.jpg !tests/workflows/unit_tests/core_steps/models/third_party/assets/*.png diff --git a/development/docs/write_openapi_spec.py b/development/docs/write_openapi_spec.py new file mode 100644 index 0000000000..e51bc24c15 --- /dev/null +++ b/development/docs/write_openapi_spec.py @@ -0,0 +1,61 @@ + + + +from functools import partial +from multiprocessing import Process + +from inference.core.cache import cache +from inference.core.interfaces.http.http_api import HttpInterface +from inference.core.interfaces.stream_manager.manager_app.app import start +from inference.core.managers.active_learning import ( + ActiveLearningManager, + BackgroundTaskActiveLearningManager, +) +from inference.core.managers.base import ModelManager +from inference.core.managers.decorators.fixed_size_cache import WithFixedSizeCache +from inference.core.registries.roboflow import ( + RoboflowModelRegistry, +) + + +from inference.models.utils import ROBOFLOW_MODEL_TYPES + + + +model_registry = RoboflowModelRegistry(ROBOFLOW_MODEL_TYPES) + + +model_manager = ModelManager(model_registry=model_registry) + +model_manager = WithFixedSizeCache(model_manager, max_size=1) +model_manager.init_pingback() +interface = HttpInterface(model_manager) +app = interface.app + + + + +from fastapi.openapi.utils import get_openapi +import json +import os + +DOCS_ROOT_DIR = os.path.abspath( + os.path.join( + os.path.dirname(__file__), + "..", + "..", + "docs", + ) +) + +filename = os.path.join(DOCS_ROOT_DIR, "openapi.json") + +with open(filename, 'w') as f: + json.dump(get_openapi( + title=app.title, + version=app.version, + openapi_version=app.openapi_version, + description=app.description, + routes=app.routes, + + ), f) \ No newline at end of file diff --git a/docs/api.md b/docs/api.md index bb325bcf8c..55f2a6331c 100644 --- a/docs/api.md +++ b/docs/api.md @@ -1,5 +1,5 @@ -The Roboflow Inference Server provides OpenAPI documentation at the `/docs` endpoint for use in development. +When the Inference Server is running, it provides OpenAPI documentation at the `/docs` endpoint for use in development. -Below is the OpenAPI specification for the Inference Server, rendered with Swagger. +Below is the OpenAPI specification for the Inference Server for the current release version. diff --git a/docs/openapi.json b/docs/openapi.json deleted file mode 100644 index 48dddf77b3..0000000000 --- a/docs/openapi.json +++ /dev/null @@ -1 +0,0 @@ -{"openapi":"3.0.2","info":{"title":"Roboflow Inference Server","description":"Roboflow inference server","termsOfService":"https://roboflow.com/terms","contact":{"name":"Roboflow Inc.","url":"https://roboflow.com/contact","email":"help@roboflow.com"},"license":{"name":"Apache 2.0","url":"https://www.apache.org/licenses/LICENSE-2.0.html"},"version":"0.9.2"},"paths":{"/info":{"get":{"summary":"Info","description":"Get the server name and version number","operationId":"root_info_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ServerVersionInfo"}}}}}}},"/model/registry":{"get":{"summary":"Get model keys","description":"Get the ID of each loaded model","operationId":"registry_model_registry_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ModelsDescriptions"}}}}}}},"/model/add":{"post":{"summary":"Load a model","description":"Load the model with the given model ID","operationId":"model_add_model_add_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/AddModelRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ModelsDescriptions"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/model/remove":{"post":{"summary":"Remove a model","description":"Remove the model with the given model ID","operationId":"model_remove_model_remove_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ClearModelRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ModelsDescriptions"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/model/clear":{"post":{"summary":"Remove all models","description":"Remove all loaded models","operationId":"model_clear_model_clear_post","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ModelsDescriptions"}}}}}}},"/infer/object_detection":{"post":{"summary":"Object detection infer","description":"Run inference with the specified object detection model","operationId":"infer_object_detection_infer_object_detection_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ObjectDetectionInferenceRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"title":"Response Infer Object Detection Infer Object Detection Post","anyOf":[{"$ref":"#/components/schemas/ObjectDetectionInferenceResponse"},{"type":"array","items":{"$ref":"#/components/schemas/ObjectDetectionInferenceResponse"}}]}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/infer/instance_segmentation":{"post":{"summary":"Instance segmentation infer","description":"Run inference with the specified instance segmentation model","operationId":"infer_instance_segmentation_infer_instance_segmentation_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/InstanceSegmentationInferenceRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/InstanceSegmentationInferenceResponse"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/infer/classification":{"post":{"summary":"Classification infer","description":"Run inference with the specified classification model","operationId":"infer_classification_infer_classification_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ClassificationInferenceRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"title":"Response Infer Classification Infer Classification Post","anyOf":[{"$ref":"#/components/schemas/ClassificationInferenceResponse"},{"$ref":"#/components/schemas/MultiLabelClassificationInferenceResponse"}]}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/clip/embed_image":{"post":{"summary":"CLIP Image Embeddings","description":"Run the Open AI CLIP model to embed image data.","operationId":"clip_embed_image_clip_embed_image_post","parameters":[{"description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval","required":false,"schema":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"name":"api_key","in":"query"}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ClipImageEmbeddingRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ClipEmbeddingResponse"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/clip/embed_text":{"post":{"summary":"CLIP Text Embeddings","description":"Run the Open AI CLIP model to embed text data.","operationId":"clip_embed_text_clip_embed_text_post","parameters":[{"description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval","required":false,"schema":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"name":"api_key","in":"query"}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ClipTextEmbeddingRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ClipEmbeddingResponse"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/clip/compare":{"post":{"summary":"CLIP Compare","description":"Run the Open AI CLIP model to compute similarity scores.","operationId":"clip_compare_clip_compare_post","parameters":[{"description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval","required":false,"schema":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"name":"api_key","in":"query"}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ClipCompareRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ClipCompareResponse"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/sam/embed_image":{"post":{"summary":"SAM Image Embeddings","description":"Run the Meta AI Segmant Anything Model to embed image data.","operationId":"sam_embed_image_sam_embed_image_post","parameters":[{"description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval","required":false,"schema":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"name":"api_key","in":"query"}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/SamEmbeddingRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SamEmbeddingResponse"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/sam/segment_image":{"post":{"summary":"SAM Image Segmentation","description":"Run the Meta AI Segmant Anything Model to generate segmenations for image data.","operationId":"sam_segment_image_sam_segment_image_post","parameters":[{"description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval","required":false,"schema":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"name":"api_key","in":"query"}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/SamSegmentationRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SamSegmentationResponse"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/gaze/gaze_detection":{"post":{"summary":"Gaze Detection","description":"Run the gaze detection model to detect gaze.","operationId":"gaze_detection_gaze_gaze_detection_post","parameters":[{"description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval","required":false,"schema":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"name":"api_key","in":"query"}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GazeDetectionInferenceRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"title":"Response Gaze Detection Gaze Gaze Detection Post","type":"array","items":{"$ref":"#/components/schemas/GazeDetectionInferenceResponse"}}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/{dataset_id}/{version_id}":{"post":{"summary":"Legacy Infer From Request","description":"Legacy inference endpoint for object detection, instance segmentation, and classification.\n\nArgs:\n dataset_id (str): ID of a Roboflow dataset corresponding to the model to use for inference.\n version_id (str): ID of a Roboflow dataset version corresponding to the model to use for inference.\n api_key (Optional[str], default None): Roboflow API Key passed to the model during initialization for artifact retrieval.\n # Other parameters described in the function signature...\n\nReturns:\n Union[M.InstanceSegmentationInferenceResponse, M.ObjectDetectionInferenceResponse, M.ClassificationInferenceResponse, M.MultiLabelClassificationInferenceResponse, Any]: The response containing the inference results.","operationId":"legacy_infer_from_request__dataset_id___version_id__post","parameters":[{"description":"ID of a Roboflow dataset corresponding to the model to use for inference","required":true,"schema":{"title":"Dataset Id","type":"string","description":"ID of a Roboflow dataset corresponding to the model to use for inference"},"name":"dataset_id","in":"path"},{"description":"ID of a Roboflow dataset version corresponding to the model to use for inference","required":true,"schema":{"title":"Version Id","type":"string","description":"ID of a Roboflow dataset version corresponding to the model to use for inference"},"name":"version_id","in":"path"},{"description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval","required":false,"schema":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"name":"api_key","in":"query"},{"description":"The confidence threshold used to filter out predictions","required":false,"schema":{"title":"Confidence","type":"number","description":"The confidence threshold used to filter out predictions","default":0.4},"name":"confidence","in":"query"},{"description":"One of 'json' or 'image'. If 'json' prediction data is return as a JSON string. If 'image' prediction data is visualized and overlayed on the original input image.","required":false,"schema":{"title":"Format","type":"string","description":"One of 'json' or 'image'. If 'json' prediction data is return as a JSON string. If 'image' prediction data is visualized and overlayed on the original input image.","default":"json"},"name":"format","in":"query"},{"description":"The publically accessible URL of an image to use for inference.","required":false,"schema":{"title":"Image","type":"string","description":"The publically accessible URL of an image to use for inference."},"name":"image","in":"query"},{"description":"One of base64 or numpy. Note, numpy input is not supported for Roboflow Hosted Inference.","required":false,"schema":{"title":"Image Type","type":"string","description":"One of base64 or numpy. Note, numpy input is not supported for Roboflow Hosted Inference.","default":"base64"},"name":"image_type","in":"query"},{"description":"If true, labels will be include in any inference visualization.","required":false,"schema":{"title":"Labels","type":"boolean","description":"If true, labels will be include in any inference visualization.","default":false},"name":"labels","in":"query"},{"description":"One of 'accurate' or 'fast'. If 'accurate' the mask will be decoded using the original image size. If 'fast' the mask will be decoded using the original mask size. 'accurate' is slower but more accurate.","required":false,"schema":{"title":"Mask Decode Mode","type":"string","description":"One of 'accurate' or 'fast'. If 'accurate' the mask will be decoded using the original image size. If 'fast' the mask will be decoded using the original mask size. 'accurate' is slower but more accurate.","default":"accurate"},"name":"mask_decode_mode","in":"query"},{"description":"The amount to tradeoff between 0='fast' and 1='accurate'","required":false,"schema":{"title":"Tradeoff Factor","type":"number","description":"The amount to tradeoff between 0='fast' and 1='accurate'","default":0.0},"name":"tradeoff_factor","in":"query"},{"description":"The maximum number of detections to return. This is used to limit the number of predictions returned by the model. The model may return more predictions than this number, but only the top `max_detections` predictions will be returned.","required":false,"schema":{"title":"Max Detections","type":"integer","description":"The maximum number of detections to return. This is used to limit the number of predictions returned by the model. The model may return more predictions than this number, but only the top `max_detections` predictions will be returned.","default":300},"name":"max_detections","in":"query"},{"description":"The IoU threhsold that must be met for a box pair to be considered duplicate during NMS","required":false,"schema":{"title":"Overlap","type":"number","description":"The IoU threhsold that must be met for a box pair to be considered duplicate during NMS","default":0.3},"name":"overlap","in":"query"},{"description":"The stroke width used when visualizing predictions","required":false,"schema":{"title":"Stroke","type":"integer","description":"The stroke width used when visualizing predictions","default":1},"name":"stroke","in":"query"},{"description":"If true, disables automatic image orientation","required":false,"schema":{"title":"Disable Preproc Auto Orient","type":"boolean","description":"If true, disables automatic image orientation","default":false},"name":"disable_preproc_auto_orient","in":"query"},{"description":"If true, disables automatic contrast adjustment","required":false,"schema":{"title":"Disable Preproc Contrast","type":"boolean","description":"If true, disables automatic contrast adjustment","default":false},"name":"disable_preproc_contrast","in":"query"},{"description":"If true, disables automatic grayscale conversion","required":false,"schema":{"title":"Disable Preproc Grayscale","type":"boolean","description":"If true, disables automatic grayscale conversion","default":false},"name":"disable_preproc_grayscale","in":"query"},{"description":"If true, disables automatic static crop","required":false,"schema":{"title":"Disable Preproc Static Crop","type":"boolean","description":"If true, disables automatic static crop","default":false},"name":"disable_preproc_static_crop","in":"query"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"title":"Response Legacy Infer From Request Dataset Id Version Id Post","anyOf":[{"$ref":"#/components/schemas/InstanceSegmentationInferenceResponse"},{"$ref":"#/components/schemas/ObjectDetectionInferenceResponse"},{"$ref":"#/components/schemas/ClassificationInferenceResponse"},{"$ref":"#/components/schemas/MultiLabelClassificationInferenceResponse"},{}]}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/clear_cache":{"get":{"summary":"Legacy Clear Cache","description":"Clears the model cache.\n\nThis endpoint provides a way to clear the cache of loaded models.\n\nReturns:\n str: A string indicating that the cache has been cleared.","operationId":"legacy_clear_cache_clear_cache_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"title":"Response Legacy Clear Cache Clear Cache Get","type":"string"}}}}}}},"/start/{dataset_id}/{version_id}":{"get":{"summary":"Model Add","description":"Starts a model inference session.\n\nThis endpoint initializes and starts an inference session for the specified model version.\n\nArgs:\n dataset_id (str): ID of a Roboflow dataset corresponding to the model.\n version_id (str): ID of a Roboflow dataset version corresponding to the model.\n api_key (str, optional): Roboflow API Key for artifact retrieval.\n\nReturns:\n JSONResponse: A response object containing the status and a success message.","operationId":"model_add_start__dataset_id___version_id__get","parameters":[{"required":true,"schema":{"title":"Dataset Id","type":"string"},"name":"dataset_id","in":"path"},{"required":true,"schema":{"title":"Version Id","type":"string"},"name":"version_id","in":"path"},{"required":false,"schema":{"title":"Api Key","type":"string"},"name":"api_key","in":"query"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}}},"components":{"schemas":{"AddModelRequest":{"title":"AddModelRequest","required":["model_id"],"type":"object","properties":{"model_id":{"title":"Model Id","type":"string","description":"A unique model identifier","example":"raccoon-detector-1"},"model_type":{"title":"Model Type","type":"string","description":"The type of the model, usually referring to what task the model performs","example":"object-detection"},"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"}},"description":"Request to add a model to the inference server.\n\nAttributes:\n model_id (str): A unique model identifier.\n model_type (Optional[str]): The type of the model, usually referring to what task the model performs.\n api_key (Optional[str]): Roboflow API Key that will be passed to the model during initialization for artifact retrieval."},"ClassificationInferenceRequest":{"title":"ClassificationInferenceRequest","required":["image"],"type":"object","properties":{"model_id":{"title":"Model Id","type":"string","description":"A unique model identifier","example":"raccoon-detector-1"},"model_type":{"title":"Model Type","type":"string","description":"The type of the model, usually referring to what task the model performs","example":"object-detection"},"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"image":{"title":"Image","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceRequestImage"}},{"$ref":"#/components/schemas/InferenceRequestImage"}]},"disable_preproc_auto_orient":{"title":"Disable Preproc Auto Orient","type":"boolean","description":"If true, the auto orient preprocessing step is disabled for this call.","default":false},"disable_preproc_contrast":{"title":"Disable Preproc Contrast","type":"boolean","description":"If true, the auto contrast preprocessing step is disabled for this call.","default":false},"disable_preproc_grayscale":{"title":"Disable Preproc Grayscale","type":"boolean","description":"If true, the grayscale preprocessing step is disabled for this call.","default":false},"disable_preproc_static_crop":{"title":"Disable Preproc Static Crop","type":"boolean","description":"If true, the static crop preprocessing step is disabled for this call.","default":false},"confidence":{"title":"Confidence","type":"number","description":"The confidence threshold used to filter out predictions","default":0.0,"example":0.5},"visualization_stroke_width":{"title":"Visualization Stroke Width","type":"integer","description":"The stroke width used when visualizing predictions","default":1,"example":1},"visualize_predictions":{"title":"Visualize Predictions","type":"boolean","description":"If true, the predictions will be drawn on the original image and returned as a base64 string","default":false,"example":false}},"description":"Classification inference request.\n\nAttributes:\n confidence (Optional[float]): The confidence threshold used to filter out predictions.\n visualization_stroke_width (Optional[int]): The stroke width used when visualizing predictions.\n visualize_predictions (Optional[bool]): If true, the predictions will be drawn on the original image and returned as a base64 string."},"ClassificationInferenceResponse":{"title":"ClassificationInferenceResponse","required":["image","predictions","top","confidence"],"type":"object","properties":{"visualization":{"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"frame_id":{"title":"Frame Id","type":"integer","description":"The frame id of the image used in inference if the input was a video"},"time":{"title":"Time","type":"number","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"title":"Image","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceResponseImage"}},{"$ref":"#/components/schemas/InferenceResponseImage"}]},"predictions":{"title":"Predictions","type":"array","items":{"$ref":"#/components/schemas/ClassificationPrediction"}},"top":{"title":"Top","type":"string","description":"The top predicted class label"},"confidence":{"title":"Confidence","type":"number","description":"The confidence of the top predicted class label"}},"description":"Classification inference response.\n\nAttributes:\n predictions (List[ClassificationPrediction]): List of classification predictions.\n top (str): The top predicted class label.\n confidence (float): The confidence of the top predicted class label."},"ClassificationPrediction":{"title":"ClassificationPrediction","required":["class","class_id","confidence"],"type":"object","properties":{"class":{"title":"Class","type":"string","description":"The predicted class label"},"class_id":{"title":"Class Id","type":"integer","description":"Numeric ID associated with the class label"},"confidence":{"title":"Confidence","type":"number","description":"The class label confidence as a fraction between 0 and 1"}},"description":"Classification prediction.\n\nAttributes:\n class_name (str): The predicted class label.\n class_id (int): Numeric ID associated with the class label.\n confidence (float): The class label confidence as a fraction between 0 and 1."},"ClearModelRequest":{"title":"ClearModelRequest","required":["model_id"],"type":"object","properties":{"model_id":{"title":"Model Id","type":"string","description":"A unique model identifier","example":"raccoon-detector-1"}},"description":"Request to clear a model from the inference server.\n\nAttributes:\n model_id (str): A unique model identifier."},"ClipCompareRequest":{"title":"ClipCompareRequest","required":["subject","prompt"],"type":"object","properties":{"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"clip_version_id":{"title":"Clip Version Id","type":"string","description":"The version ID of CLIP to be used for this request. Must be one of RN101, RN50, RN50x16, RN50x4, RN50x64, ViT-B-16, ViT-B-32, ViT-L-14-336px, and ViT-L-14.","default":"ViT-B-16","example":"ViT-B-16"},"subject":{"title":"Subject","anyOf":[{"$ref":"#/components/schemas/InferenceRequestImage"},{"type":"string"}],"description":"The type of image data provided, one of 'url' or 'base64'","example":"url"},"subject_type":{"title":"Subject Type","type":"string","description":"The type of subject, one of 'image' or 'text'","default":"image","example":"image"},"prompt":{"title":"Prompt","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceRequestImage"}},{"$ref":"#/components/schemas/InferenceRequestImage"},{"type":"string"},{"type":"array","items":{"type":"string"}},{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#/components/schemas/InferenceRequestImage"},{"type":"string"}]}}]},"prompt_type":{"title":"Prompt Type","type":"string","description":"The type of prompt, one of 'image' or 'text'","default":"text","example":"text"}},"description":"Request for CLIP comparison.\n\nAttributes:\n subject (Union[InferenceRequestImage, str]): The type of image data provided, one of 'url' or 'base64'.\n subject_type (str): The type of subject, one of 'image' or 'text'.\n prompt (Union[List[InferenceRequestImage], InferenceRequestImage, str, List[str], Dict[str, Union[InferenceRequestImage, str]]]): The prompt for comparison.\n prompt_type (str): The type of prompt, one of 'image' or 'text'."},"ClipCompareResponse":{"title":"ClipCompareResponse","required":["similarity"],"type":"object","properties":{"frame_id":{"title":"Frame Id","type":"integer","description":"The frame id of the image used in inference if the input was a video"},"time":{"title":"Time","type":"number","description":"The time in seconds it took to produce the similarity scores including preprocessing"},"similarity":{"title":"Similarity","anyOf":[{"type":"array","items":{"type":"number"}},{"type":"object","additionalProperties":{"type":"number"}}]}},"description":"Response for CLIP comparison.\n\nAttributes:\n similarity (Union[List[float], Dict[str, float]]): Similarity scores.\n time (float): The time in seconds it took to produce the similarity scores including preprocessing."},"ClipEmbeddingResponse":{"title":"ClipEmbeddingResponse","required":["embeddings"],"type":"object","properties":{"frame_id":{"title":"Frame Id","type":"integer","description":"The frame id of the image used in inference if the input was a video"},"time":{"title":"Time","type":"number","description":"The time in seconds it took to produce the embeddings including preprocessing"},"embeddings":{"title":"Embeddings","type":"array","items":{"type":"array","items":{"type":"number"}},"description":"A list of embeddings, each embedding is a list of floats","example":"[[0.12, 0.23, 0.34, ..., 0.43]]"}},"description":"Response for CLIP embedding.\n\nAttributes:\n embeddings (List[List[float]]): A list of embeddings, each embedding is a list of floats.\n time (float): The time in seconds it took to produce the embeddings including preprocessing."},"ClipImageEmbeddingRequest":{"title":"ClipImageEmbeddingRequest","required":["image"],"type":"object","properties":{"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"clip_version_id":{"title":"Clip Version Id","type":"string","description":"The version ID of CLIP to be used for this request. Must be one of RN101, RN50, RN50x16, RN50x4, RN50x64, ViT-B-16, ViT-B-32, ViT-L-14-336px, and ViT-L-14.","default":"ViT-B-16","example":"ViT-B-16"},"image":{"title":"Image","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceRequestImage"}},{"$ref":"#/components/schemas/InferenceRequestImage"}]}},"description":"Request for CLIP image embedding.\n\nAttributes:\n image (Union[List[InferenceRequestImage], InferenceRequestImage]): Image(s) to be embedded."},"ClipTextEmbeddingRequest":{"title":"ClipTextEmbeddingRequest","required":["text"],"type":"object","properties":{"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"clip_version_id":{"title":"Clip Version Id","type":"string","description":"The version ID of CLIP to be used for this request. Must be one of RN101, RN50, RN50x16, RN50x4, RN50x64, ViT-B-16, ViT-B-32, ViT-L-14-336px, and ViT-L-14.","default":"ViT-B-16","example":"ViT-B-16"},"text":{"title":"Text","anyOf":[{"type":"array","items":{"type":"string"}},{"type":"string"}],"description":"A string or list of strings","example":"The quick brown fox jumps over the lazy dog"}},"description":"Request for CLIP text embedding.\n\nAttributes:\n text (Union[List[str], str]): A string or list of strings."},"FaceDetectionPrediction":{"title":"FaceDetectionPrediction","required":["x","y","width","height","confidence","landmarks"],"type":"object","properties":{"x":{"title":"X","type":"number","description":"The center x-axis pixel coordinate of the prediction"},"y":{"title":"Y","type":"number","description":"The center y-axis pixel coordinate of the prediction"},"width":{"title":"Width","type":"number","description":"The width of the prediction bounding box in number of pixels"},"height":{"title":"Height","type":"number","description":"The height of the prediction bounding box in number of pixels"},"confidence":{"title":"Confidence","type":"number","description":"The detection confidence as a fraction between 0 and 1"},"class":{"title":"Class","type":"string","description":"The predicted class label","default":"face"},"class_confidence":{"title":"Class Confidence","type":"number","description":"The class label confidence as a fraction between 0 and 1"},"class_id":{"title":"Class Id","type":"integer","description":"The class id of the prediction","default":0},"tracker_id":{"title":"Tracker Id","type":"integer","description":"The tracker id of the prediction if tracking is enabled"},"landmarks":{"title":"Landmarks","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/Point"}},{"type":"array","items":{"$ref":"#/components/schemas/Point3D"}}]}},"description":"Face Detection prediction.\n\nAttributes:\n class_name (str): fixed value \"face\".\n landmarks (Union[List[Point], List[Point3D]]): The detected face landmarks."},"GazeDetectionInferenceRequest":{"title":"GazeDetectionInferenceRequest","required":["image"],"type":"object","properties":{"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"gaze_version_id":{"title":"Gaze Version Id","type":"string","description":"The version ID of Gaze to be used for this request. Must be one of l2cs.","default":"L2CS","example":"l2cs"},"do_run_face_detection":{"title":"Do Run Face Detection","type":"boolean","description":"If true, face detection will be applied; if false, face detection will be ignored and the whole input image will be used for gaze detection","default":true,"example":false},"image":{"title":"Image","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceRequestImage"}},{"$ref":"#/components/schemas/InferenceRequestImage"}]}},"description":"Request for gaze detection inference.\n\nAttributes:\n api_key (Optional[str]): Roboflow API Key.\n gaze_version_id (Optional[str]): The version ID of Gaze to be used for this request.\n do_run_face_detection (Optional[bool]): If true, face detection will be applied; if false, face detection will be ignored and the whole input image will be used for gaze detection.\n image (Union[List[InferenceRequestImage], InferenceRequestImage]): Image(s) for inference."},"GazeDetectionInferenceResponse":{"title":"GazeDetectionInferenceResponse","required":["predictions","time"],"type":"object","properties":{"predictions":{"title":"Predictions","type":"array","items":{"$ref":"#/components/schemas/GazeDetectionPrediction"}},"time":{"title":"Time","type":"number","description":"The processing time (second)"},"time_face_det":{"title":"Time Face Det","type":"number","description":"The face detection time (second)"},"time_gaze_det":{"title":"Time Gaze Det","type":"number","description":"The gaze detection time (second)"}},"description":"Response for gaze detection inference.\n\nAttributes:\n predictions (List[GazeDetectionPrediction]): List of gaze detection predictions.\n time (float): The processing time (second)."},"GazeDetectionPrediction":{"title":"GazeDetectionPrediction","required":["face","yaw","pitch"],"type":"object","properties":{"face":{"$ref":"#/components/schemas/FaceDetectionPrediction"},"yaw":{"title":"Yaw","type":"number","description":"Yaw (radian) of the detected face"},"pitch":{"title":"Pitch","type":"number","description":"Pitch (radian) of the detected face"}},"description":"Gaze Detection prediction.\n\nAttributes:\n face (FaceDetectionPrediction): The face prediction.\n yaw (float): Yaw (radian) of the detected face.\n pitch (float): Pitch (radian) of the detected face."},"HTTPValidationError":{"title":"HTTPValidationError","type":"object","properties":{"detail":{"title":"Detail","type":"array","items":{"$ref":"#/components/schemas/ValidationError"}}}},"InferenceRequestImage":{"title":"InferenceRequestImage","required":["type"],"type":"object","properties":{"type":{"title":"Type","type":"string","description":"The type of image data provided, one of 'url', 'base64', or 'numpy'","example":"url"},"value":{"title":"Value","description":"Image data corresponding to the image type, if type = 'url' then value is a string containing the url of an image, else if type = 'base64' then value is a string containing base64 encoded image data, else if type = 'numpy' then value is binary numpy data serialized using pickle.dumps(); array should 3 dimensions, channels last, with values in the range [0,255].","example":"http://www.example-image-url.com"}},"description":"Image data for inference request.\n\nAttributes:\n type (str): The type of image data provided, one of 'url', 'base64', or 'numpy'.\n value (Optional[Any]): Image data corresponding to the image type."},"InferenceResponseImage":{"title":"InferenceResponseImage","required":["width","height"],"type":"object","properties":{"width":{"title":"Width","type":"integer","description":"The original width of the image used in inference"},"height":{"title":"Height","type":"integer","description":"The original height of the image used in inference"}},"description":"Inference response image information.\n\nAttributes:\n width (int): The original width of the image used in inference.\n height (int): The original height of the image used in inference."},"InstanceSegmentationInferenceRequest":{"title":"InstanceSegmentationInferenceRequest","required":["image"],"type":"object","properties":{"model_id":{"title":"Model Id","type":"string","description":"A unique model identifier","example":"raccoon-detector-1"},"model_type":{"title":"Model Type","type":"string","description":"The type of the model, usually referring to what task the model performs","example":"object-detection"},"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"image":{"title":"Image","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceRequestImage"}},{"$ref":"#/components/schemas/InferenceRequestImage"}]},"disable_preproc_auto_orient":{"title":"Disable Preproc Auto Orient","type":"boolean","description":"If true, the auto orient preprocessing step is disabled for this call.","default":false},"disable_preproc_contrast":{"title":"Disable Preproc Contrast","type":"boolean","description":"If true, the auto contrast preprocessing step is disabled for this call.","default":false},"disable_preproc_grayscale":{"title":"Disable Preproc Grayscale","type":"boolean","description":"If true, the grayscale preprocessing step is disabled for this call.","default":false},"disable_preproc_static_crop":{"title":"Disable Preproc Static Crop","type":"boolean","description":"If true, the static crop preprocessing step is disabled for this call.","default":false},"class_agnostic_nms":{"title":"Class Agnostic Nms","type":"boolean","description":"If true, NMS is applied to all detections at once, if false, NMS is applied per class","default":false,"example":false},"class_filter":{"title":"Class Filter","type":"array","items":{"type":"string"},"description":"If provided, only predictions for the listed classes will be returned","example":["class-1","class-2","class-n"]},"confidence":{"title":"Confidence","type":"number","description":"The confidence threshold used to filter out predictions","default":0.0,"example":0.5},"fix_batch_size":{"title":"Fix Batch Size","type":"boolean","description":"If true, the batch size will be fixed to the maximum batch size configured for this server","default":false,"example":false},"iou_threshold":{"title":"Iou Threshold","type":"number","description":"The IoU threhsold that must be met for a box pair to be considered duplicate during NMS","default":1.0,"example":0.5},"max_detections":{"title":"Max Detections","type":"integer","description":"The maximum number of detections that will be returned","default":300,"example":300},"max_candidates":{"title":"Max Candidates","type":"integer","description":"The maximum number of candidate detections passed to NMS","default":3000},"visualization_labels":{"title":"Visualization Labels","type":"boolean","description":"If true, labels will be rendered on prediction visualizations","default":false,"example":false},"visualization_stroke_width":{"title":"Visualization Stroke Width","type":"integer","description":"The stroke width used when visualizing predictions","default":1,"example":1},"visualize_predictions":{"title":"Visualize Predictions","type":"boolean","description":"If true, the predictions will be drawn on the original image and returned as a base64 string","default":false,"example":false},"mask_decode_mode":{"title":"Mask Decode Mode","type":"string","description":"The mode used to decode instance segmentation masks, one of 'accurate', 'fast', 'tradeoff'","default":"accurate","example":"accurate"},"tradeoff_factor":{"title":"Tradeoff Factor","type":"number","description":"The amount to tradeoff between 0='fast' and 1='accurate'","default":0.0,"example":0.5}},"description":"Instance Segmentation inference request.\n\nAttributes:\n mask_decode_mode (Optional[str]): The mode used to decode instance segmentation masks, one of 'accurate', 'fast', 'tradeoff'.\n tradeoff_factor (Optional[float]): The amount to tradeoff between 0='fast' and 1='accurate'."},"InstanceSegmentationInferenceResponse":{"title":"InstanceSegmentationInferenceResponse","required":["image","predictions"],"type":"object","properties":{"visualization":{"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"frame_id":{"title":"Frame Id","type":"integer","description":"The frame id of the image used in inference if the input was a video"},"time":{"title":"Time","type":"number","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"title":"Image","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceResponseImage"}},{"$ref":"#/components/schemas/InferenceResponseImage"}]},"predictions":{"title":"Predictions","type":"array","items":{"$ref":"#/components/schemas/InstanceSegmentationPrediction"}}},"description":"Instance Segmentation inference response.\n\nAttributes:\n predictions (List[InstanceSegmentationPrediction]): List of instance segmentation predictions."},"InstanceSegmentationPrediction":{"title":"InstanceSegmentationPrediction","required":["x","y","width","height","confidence","class","points","class_id"],"type":"object","properties":{"x":{"title":"X","type":"number","description":"The center x-axis pixel coordinate of the prediction"},"y":{"title":"Y","type":"number","description":"The center y-axis pixel coordinate of the prediction"},"width":{"title":"Width","type":"number","description":"The width of the prediction bounding box in number of pixels"},"height":{"title":"Height","type":"number","description":"The height of the prediction bounding box in number of pixels"},"confidence":{"title":"Confidence","type":"number","description":"The detection confidence as a fraction between 0 and 1"},"class":{"title":"Class","type":"string","description":"The predicted class label"},"class_confidence":{"title":"Class Confidence","type":"number","description":"The class label confidence as a fraction between 0 and 1"},"points":{"title":"Points","type":"array","items":{"$ref":"#/components/schemas/Point"},"description":"The list of points that make up the instance polygon"},"class_id":{"title":"Class Id","type":"integer","description":"The class id of the prediction"}},"description":"Instance Segmentation prediction.\n\nAttributes:\n x (float): The center x-axis pixel coordinate of the prediction.\n y (float): The center y-axis pixel coordinate of the prediction.\n width (float): The width of the prediction bounding box in number of pixels.\n height (float): The height of the prediction bounding box in number of pixels.\n confidence (float): The detection confidence as a fraction between 0 and 1.\n class_name (str): The predicted class label.\n class_confidence (Union[float, None]): The class label confidence as a fraction between 0 and 1.\n points (List[Point]): The list of points that make up the instance polygon.\n class_id: int = Field(description=\"The class id of the prediction\")"},"ModelDescriptionEntity":{"title":"ModelDescriptionEntity","required":["model_id","task_type"],"type":"object","properties":{"model_id":{"title":"Model Id","type":"string","description":"Identifier of the model","example":"some-project/3"},"task_type":{"title":"Task Type","type":"string","description":"Type of the task that the model performs","example":"classification"},"batch_size":{"title":"Batch Size","anyOf":[{"type":"integer"},{"type":"string"}],"description":"Batch size accepted by the model (if registered)."},"input_height":{"title":"Input Height","type":"integer","description":"Image input height accepted by the model (if registered)."},"input_width":{"title":"Input Width","type":"integer","description":"Image input width accepted by the model (if registered)."}}},"ModelsDescriptions":{"title":"ModelsDescriptions","required":["models"],"type":"object","properties":{"models":{"title":"Models","type":"array","items":{"$ref":"#/components/schemas/ModelDescriptionEntity"},"description":"List of models that are loaded by model manager."}}},"MultiLabelClassificationInferenceResponse":{"title":"MultiLabelClassificationInferenceResponse","required":["image","predictions","predicted_classes"],"type":"object","properties":{"visualization":{"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"frame_id":{"title":"Frame Id","type":"integer","description":"The frame id of the image used in inference if the input was a video"},"time":{"title":"Time","type":"number","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"title":"Image","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceResponseImage"}},{"$ref":"#/components/schemas/InferenceResponseImage"}]},"predictions":{"title":"Predictions","type":"object","additionalProperties":{"$ref":"#/components/schemas/MultiLabelClassificationPrediction"}},"predicted_classes":{"title":"Predicted Classes","type":"array","items":{"type":"string"},"description":"The list of predicted classes"}},"description":"Multi-label Classification inference response.\n\nAttributes:\n predictions (Dict[str, MultiLabelClassificationPrediction]): Dictionary of multi-label classification predictions.\n predicted_classes (List[str]): The list of predicted classes."},"MultiLabelClassificationPrediction":{"title":"MultiLabelClassificationPrediction","required":["confidence"],"type":"object","properties":{"confidence":{"title":"Confidence","type":"number","description":"The class label confidence as a fraction between 0 and 1"}},"description":"Multi-label Classification prediction.\n\nAttributes:\n confidence (float): The class label confidence as a fraction between 0 and 1."},"ObjectDetectionInferenceRequest":{"title":"ObjectDetectionInferenceRequest","required":["image"],"type":"object","properties":{"model_id":{"title":"Model Id","type":"string","description":"A unique model identifier","example":"raccoon-detector-1"},"model_type":{"title":"Model Type","type":"string","description":"The type of the model, usually referring to what task the model performs","example":"object-detection"},"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"image":{"title":"Image","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceRequestImage"}},{"$ref":"#/components/schemas/InferenceRequestImage"}]},"disable_preproc_auto_orient":{"title":"Disable Preproc Auto Orient","type":"boolean","description":"If true, the auto orient preprocessing step is disabled for this call.","default":false},"disable_preproc_contrast":{"title":"Disable Preproc Contrast","type":"boolean","description":"If true, the auto contrast preprocessing step is disabled for this call.","default":false},"disable_preproc_grayscale":{"title":"Disable Preproc Grayscale","type":"boolean","description":"If true, the grayscale preprocessing step is disabled for this call.","default":false},"disable_preproc_static_crop":{"title":"Disable Preproc Static Crop","type":"boolean","description":"If true, the static crop preprocessing step is disabled for this call.","default":false},"class_agnostic_nms":{"title":"Class Agnostic Nms","type":"boolean","description":"If true, NMS is applied to all detections at once, if false, NMS is applied per class","default":false,"example":false},"class_filter":{"title":"Class Filter","type":"array","items":{"type":"string"},"description":"If provided, only predictions for the listed classes will be returned","example":["class-1","class-2","class-n"]},"confidence":{"title":"Confidence","type":"number","description":"The confidence threshold used to filter out predictions","default":0.0,"example":0.5},"fix_batch_size":{"title":"Fix Batch Size","type":"boolean","description":"If true, the batch size will be fixed to the maximum batch size configured for this server","default":false,"example":false},"iou_threshold":{"title":"Iou Threshold","type":"number","description":"The IoU threhsold that must be met for a box pair to be considered duplicate during NMS","default":1.0,"example":0.5},"max_detections":{"title":"Max Detections","type":"integer","description":"The maximum number of detections that will be returned","default":300,"example":300},"max_candidates":{"title":"Max Candidates","type":"integer","description":"The maximum number of candidate detections passed to NMS","default":3000},"visualization_labels":{"title":"Visualization Labels","type":"boolean","description":"If true, labels will be rendered on prediction visualizations","default":false,"example":false},"visualization_stroke_width":{"title":"Visualization Stroke Width","type":"integer","description":"The stroke width used when visualizing predictions","default":1,"example":1},"visualize_predictions":{"title":"Visualize Predictions","type":"boolean","description":"If true, the predictions will be drawn on the original image and returned as a base64 string","default":false,"example":false}},"description":"Object Detection inference request.\n\nAttributes:\n class_agnostic_nms (Optional[bool]): If true, NMS is applied to all detections at once, if false, NMS is applied per class.\n class_filter (Optional[List[str]]): If provided, only predictions for the listed classes will be returned.\n confidence (Optional[float]): The confidence threshold used to filter out predictions.\n fix_batch_size (Optional[bool]): If true, the batch size will be fixed to the maximum batch size configured for this server.\n iou_threshold (Optional[float]): The IoU threshold that must be met for a box pair to be considered duplicate during NMS.\n max_detections (Optional[int]): The maximum number of detections that will be returned.\n max_candidates (Optional[int]): The maximum number of candidate detections passed to NMS.\n visualization_labels (Optional[bool]): If true, labels will be rendered on prediction visualizations.\n visualization_stroke_width (Optional[int]): The stroke width used when visualizing predictions.\n visualize_predictions (Optional[bool]): If true, the predictions will be drawn on the original image and returned as a base64 string."},"ObjectDetectionInferenceResponse":{"title":"ObjectDetectionInferenceResponse","required":["image","predictions"],"type":"object","properties":{"visualization":{"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"frame_id":{"title":"Frame Id","type":"integer","description":"The frame id of the image used in inference if the input was a video"},"time":{"title":"Time","type":"number","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"title":"Image","anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/InferenceResponseImage"}},{"$ref":"#/components/schemas/InferenceResponseImage"}]},"predictions":{"title":"Predictions","type":"array","items":{"$ref":"#/components/schemas/ObjectDetectionPrediction"}}},"description":"Object Detection inference response.\n\nAttributes:\n predictions (List[ObjectDetectionPrediction]): List of object detection predictions."},"ObjectDetectionPrediction":{"title":"ObjectDetectionPrediction","required":["x","y","width","height","confidence","class","class_id"],"type":"object","properties":{"x":{"title":"X","type":"number","description":"The center x-axis pixel coordinate of the prediction"},"y":{"title":"Y","type":"number","description":"The center y-axis pixel coordinate of the prediction"},"width":{"title":"Width","type":"number","description":"The width of the prediction bounding box in number of pixels"},"height":{"title":"Height","type":"number","description":"The height of the prediction bounding box in number of pixels"},"confidence":{"title":"Confidence","type":"number","description":"The detection confidence as a fraction between 0 and 1"},"class":{"title":"Class","type":"string","description":"The predicted class label"},"class_confidence":{"title":"Class Confidence","type":"number","description":"The class label confidence as a fraction between 0 and 1"},"class_id":{"title":"Class Id","type":"integer","description":"The class id of the prediction"},"tracker_id":{"title":"Tracker Id","type":"integer","description":"The tracker id of the prediction if tracking is enabled"}},"description":"Object Detection prediction.\n\nAttributes:\n x (float): The center x-axis pixel coordinate of the prediction.\n y (float): The center y-axis pixel coordinate of the prediction.\n width (float): The width of the prediction bounding box in number of pixels.\n height (float): The height of the prediction bounding box in number of pixels.\n confidence (float): The detection confidence as a fraction between 0 and 1.\n class_name (str): The predicted class label.\n class_confidence (Union[float, None]): The class label confidence as a fraction between 0 and 1.\n class_id (int): The class id of the prediction"},"Point":{"title":"Point","required":["x","y"],"type":"object","properties":{"x":{"title":"X","type":"number","description":"The x-axis pixel coordinate of the point"},"y":{"title":"Y","type":"number","description":"The y-axis pixel coordinate of the point"}},"description":"Point coordinates.\n\nAttributes:\n x (float): The x-axis pixel coordinate of the point.\n y (float): The y-axis pixel coordinate of the point."},"Point3D":{"title":"Point3D","required":["x","y","z"],"type":"object","properties":{"x":{"title":"X","type":"number","description":"The x-axis pixel coordinate of the point"},"y":{"title":"Y","type":"number","description":"The y-axis pixel coordinate of the point"},"z":{"title":"Z","type":"number","description":"The z-axis pixel coordinate of the point"}},"description":"3D Point coordinates.\n\nAttributes:\n z (float): The z-axis pixel coordinate of the point."},"SamEmbeddingRequest":{"title":"SamEmbeddingRequest","type":"object","properties":{"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"sam_version_id":{"title":"Sam Version Id","type":"string","description":"The version ID of SAM to be used for this request. Must be one of vit_h, vit_l, or vit_b.","default":"vit_h","example":"vit_h"},"image":{"title":"Image","allOf":[{"$ref":"#/components/schemas/InferenceRequestImage"}],"description":"The image to be embedded"},"image_id":{"title":"Image Id","type":"string","description":"The ID of the image to be embedded used to cache the embedding.","example":"image_id"},"format":{"title":"Format","type":"string","description":"The format of the response. Must be one of json or binary. If binary, embedding is returned as a binary numpy array.","default":"json","example":"json"}},"description":"SAM embedding request.\n\nAttributes:\n image (Optional[InferenceRequestImage]): The image to be embedded.\n image_id (Optional[str]): The ID of the image to be embedded used to cache the embedding.\n format (Optional[str]): The format of the response. Must be one of json or binary."},"SamEmbeddingResponse":{"title":"SamEmbeddingResponse","required":["time"],"type":"object","properties":{"embeddings":{"title":"Embeddings","anyOf":[{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},{}],"description":"If request format is json, embeddings is a series of nested lists representing the SAM embedding. If request format is binary, embeddings is a binary numpy array. The dimensions of the embedding are 1 x 256 x 64 x 64.","example":"[[[[0.1, 0.2, 0.3, ...] ...] ...]]"},"time":{"title":"Time","type":"number","description":"The time in seconds it took to produce the embeddings including preprocessing"}},"description":"SAM embedding response.\n\nAttributes:\n embeddings (Union[List[List[List[List[float]]]], Any]): The SAM embedding.\n time (float): The time in seconds it took to produce the embeddings including preprocessing."},"SamSegmentationRequest":{"title":"SamSegmentationRequest","type":"object","properties":{"api_key":{"title":"Api Key","type":"string","description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},"sam_version_id":{"title":"Sam Version Id","type":"string","description":"The version ID of SAM to be used for this request. Must be one of vit_h, vit_l, or vit_b.","default":"vit_h","example":"vit_h"},"embeddings":{"title":"Embeddings","anyOf":[{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}}},{}],"description":"The embeddings to be decoded. The dimensions of the embeddings are 1 x 256 x 64 x 64. If embeddings is not provided, image must be provided.","example":"[[[[0.1, 0.2, 0.3, ...] ...] ...]]"},"embeddings_format":{"title":"Embeddings Format","type":"string","description":"The format of the embeddings. Must be one of json or binary. If binary, embeddings are expected to be a binary numpy array.","default":"json","example":"json"},"format":{"title":"Format","type":"string","description":"The format of the response. Must be one of json or binary. If binary, masks are returned as binary numpy arrays. If json, masks are converted to polygons, then returned as json.","default":"json","example":"json"},"image":{"title":"Image","allOf":[{"$ref":"#/components/schemas/InferenceRequestImage"}],"description":"The image to be segmented. Only required if embeddings are not provided."},"image_id":{"title":"Image Id","type":"string","description":"The ID of the image to be segmented used to retrieve cached embeddings. If an embedding is cached, it will be used instead of generating a new embedding. If no embedding is cached, a new embedding will be generated and cached.","example":"image_id"},"has_mask_input":{"title":"Has Mask Input","type":"boolean","description":"Whether or not the request includes a mask input. If true, the mask input must be provided.","default":false,"example":true},"mask_input":{"title":"Mask Input","anyOf":[{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"number"}}}},{}],"description":"The set of output masks. If request format is json, masks is a list of polygons, where each polygon is a list of points, where each point is a tuple containing the x,y pixel coordinates of the point. If request format is binary, masks is a list of binary numpy arrays. The dimensions of each mask are 256 x 256. This is the same as the output, low resolution mask from the previous inference."},"mask_input_format":{"title":"Mask Input Format","type":"string","description":"The format of the mask input. Must be one of json or binary. If binary, mask input is expected to be a binary numpy array.","default":"json","example":"json"},"orig_im_size":{"title":"Orig Im Size","type":"array","items":{"type":"integer"},"description":"The original size of the image used to generate the embeddings. This is only required if the image is not provided.","example":[640,320]},"point_coords":{"title":"Point Coords","type":"array","items":{"type":"array","items":{"type":"number"}},"description":"The coordinates of the interactive points used during decoding. Each point (x,y pair) corresponds to a label in point_labels.","default":[[0.0,0.0]],"example":[[10.0,10.0]]},"point_labels":{"title":"Point Labels","type":"array","items":{"type":"number"},"description":"The labels of the interactive points used during decoding. A 1 represents a positive point (part of the object to be segmented). A -1 represents a negative point (not part of the object to be segmented). Each label corresponds to a point in point_coords.","default":[-1],"example":[1]},"use_mask_input_cache":{"title":"Use Mask Input Cache","type":"boolean","description":"Whether or not to use the mask input cache. If true, the mask input cache will be used if it exists. If false, the mask input cache will not be used.","default":true,"example":true}},"description":"SAM segmentation request.\n\nAttributes:\n embeddings (Optional[Union[List[List[List[List[float]]]], Any]]): The embeddings to be decoded.\n embeddings_format (Optional[str]): The format of the embeddings.\n format (Optional[str]): The format of the response.\n image (Optional[InferenceRequestImage]): The image to be segmented.\n image_id (Optional[str]): The ID of the image to be segmented used to retrieve cached embeddings.\n has_mask_input (Optional[bool]): Whether or not the request includes a mask input.\n mask_input (Optional[Union[List[List[List[float]]], Any]]): The set of output masks.\n mask_input_format (Optional[str]): The format of the mask input.\n orig_im_size (Optional[List[int]]): The original size of the image used to generate the embeddings.\n point_coords (Optional[List[List[float]]]): The coordinates of the interactive points used during decoding.\n point_labels (Optional[List[float]]): The labels of the interactive points used during decoding.\n use_mask_input_cache (Optional[bool]): Whether or not to use the mask input cache."},"SamSegmentationResponse":{"title":"SamSegmentationResponse","required":["time"],"type":"object","properties":{"masks":{"title":"Masks","anyOf":[{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"integer"}}}},{}],"description":"The set of output masks. If request format is json, masks is a list of polygons, where each polygon is a list of points, where each point is a tuple containing the x,y pixel coordinates of the point. If request format is binary, masks is a list of binary numpy arrays. The dimensions of each mask are the same as the dimensions of the input image."},"low_res_masks":{"title":"Low Res Masks","anyOf":[{"type":"array","items":{"type":"array","items":{"type":"array","items":{"type":"integer"}}}},{}],"description":"The set of output masks. If request format is json, masks is a list of polygons, where each polygon is a list of points, where each point is a tuple containing the x,y pixel coordinates of the point. If request format is binary, masks is a list of binary numpy arrays. The dimensions of each mask are 256 x 256"},"time":{"title":"Time","type":"number","description":"The time in seconds it took to produce the segmentation including preprocessing"}},"description":"SAM segmentation response.\n\nAttributes:\n masks (Union[List[List[List[int]]], Any]): The set of output masks.\n low_res_masks (Union[List[List[List[int]]], Any]): The set of output low-resolution masks.\n time (float): The time in seconds it took to produce the segmentation including preprocessing."},"ServerVersionInfo":{"title":"ServerVersionInfo","required":["name","version","uuid"],"type":"object","properties":{"name":{"title":"Name","type":"string","example":"Roboflow Inference Server"},"version":{"title":"Version","type":"string","example":"0.0.1"},"uuid":{"title":"Uuid","type":"string","example":"9c18c6f4-2266-41fb-8a0f-c12ae28f6fbe"}},"description":"Server version information.\n\nAttributes:\n name (str): Server name.\n version (str): Server version.\n uuid (str): Server UUID."},"ValidationError":{"title":"ValidationError","required":["loc","msg","type"],"type":"object","properties":{"loc":{"title":"Location","type":"array","items":{"anyOf":[{"type":"string"},{"type":"integer"}]}},"msg":{"title":"Message","type":"string"},"type":{"title":"Error Type","type":"string"}}}}}} \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 8a543d3137..bf98449d05 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -103,6 +103,8 @@ nav: - Make Predictions: inference_helpers/cli_commands/infer.md - Deploy To Cloud: inference_helpers/cli_commands/cloud.md - Reference: inference_helpers/cli_commands/reference.md + - HTTP API: + - OpenAPI Spec: api.md - inference Python Package: reference/inference/ - Active Learning: - Use Active Learning: enterprise/active-learning/active_learning.md