Embeddings

class aisploit.embeddings.BedrockEmbeddings(*, model_id: str = 'amazon.titan-embed-text-v1', client: Any = None, region_name: str | None = None, credentials_profile_name: str | None = None, model_kwargs: Dict | None = None, endpoint_url: str | None = None, normalize: bool = False)

Bases: BedrockEmbeddings, Embeddings

class aisploit.embeddings.GoogleGenerativeAIEmbeddings(*, api_key: str | None = None, model: str = 'models/embedding-001', client: Any = None, task_type: str | None = None, google_api_key: SecretStr | None = None, credentials: Any = None, client_options: Dict | None = None, transport: str | None = None, request_options: Dict | None = None)

Bases: GoogleGenerativeAIEmbeddings, Embeddings

class aisploit.embeddings.HuggingFaceEmbeddings(*, model_name: str = 'sentence-transformers/all-MiniLM-L6-v2', client: Any = None, cache_folder: str | None = None, model_kwargs: Dict[str, Any] = None, encode_kwargs: Dict[str, Any] = None, multi_process: bool = False, show_progress: bool = False)

Bases: HuggingFaceEmbeddings, Embeddings

class aisploit.embeddings.OllamaEmbeddings(*, model: str = 'llama2', base_url: str = 'http://localhost:11434', embed_instruction: str = 'passage: ', query_instruction: str = 'query: ', mirostat: int | None = None, mirostat_eta: float | None = None, mirostat_tau: float | None = None, num_ctx: int | None = None, num_gpu: int | None = None, num_thread: int | None = None, repeat_last_n: int | None = None, repeat_penalty: float | None = None, temperature: float | None = None, stop: List[str] | None = None, tfs_z: float | None = None, top_k: int | None = None, top_p: float | None = None, show_progress: bool = False, headers: dict | None = None, model_kwargs: dict | None = None)

Bases: OllamaEmbeddings, Embeddings

class aisploit.embeddings.OpenAIEmbeddings(*, api_key: str | None = None, model: str = 'text-embedding-ada-002', client: Any = None, async_client: Any = None, dimensions: int | None = None, deployment: str | None = 'text-embedding-ada-002', api_version: str | None = None, base_url: str | None = None, openai_api_type: str | None = None, openai_proxy: str | None = None, embedding_ctx_length: int = 8191, organization: str | None = None, allowed_special: Literal['all'] | Set[str] | None = None, disallowed_special: Literal['all'] | Set[str] | Sequence[str] | None = None, chunk_size: int = 1000, max_retries: int = 2, timeout: float | Tuple[float, float] | Any | None = None, headers: Any = None, tiktoken_enabled: bool = True, tiktoken_model_name: str | None = None, show_progress_bar: bool = False, model_kwargs: Dict[str, Any] = None, skip_empty: bool = False, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, retry_min_seconds: int = 4, retry_max_seconds: int = 20, http_client: Any | None = None, http_async_client: Any | None = None, check_embedding_ctx_length: bool = True)

Bases: OpenAIEmbeddings, Embeddings