|
@@ -18,10 +18,10 @@ from llama_index.readers.weaviate.utils import (
|
|
|
|
|
|
class WeaviateVectorStoreClient(BaseVectorStoreClient):
|
|
|
|
|
|
- def __init__(self, endpoint: str, api_key: str, grpc_enabled: bool):
|
|
|
- self._client = self.init_from_config(endpoint, api_key, grpc_enabled)
|
|
|
+ def __init__(self, endpoint: str, api_key: str, grpc_enabled: bool, batch_size: int):
|
|
|
+ self._client = self.init_from_config(endpoint, api_key, grpc_enabled, batch_size)
|
|
|
|
|
|
- def init_from_config(self, endpoint: str, api_key: str, grpc_enabled: bool):
|
|
|
+ def init_from_config(self, endpoint: str, api_key: str, grpc_enabled: bool, batch_size: int):
|
|
|
auth_config = weaviate.auth.AuthApiKey(api_key=api_key)
|
|
|
|
|
|
weaviate.connect.connection.has_grpc = grpc_enabled
|
|
@@ -36,7 +36,7 @@ class WeaviateVectorStoreClient(BaseVectorStoreClient):
|
|
|
client.batch.configure(
|
|
|
# `batch_size` takes an `int` value to enable auto-batching
|
|
|
# (`None` is used for manual batching)
|
|
|
- batch_size=100,
|
|
|
+ batch_size=batch_size,
|
|
|
# dynamically update the `batch_size` based on import speed
|
|
|
dynamic=True,
|
|
|
# `timeout_retries` takes an `int` value to retry on time outs
|