Skip to content

Commit cd01e34

Browse files
authored
Fix vertex linting (#35463)
* Fix vertex linting * yapf * Force input to list, cant be lazy sequence
1 parent 6b06823 commit cd01e34

File tree

2 files changed

+5
-5
lines changed

2 files changed

+5
-5
lines changed

sdks/python/apache_beam/ml/rag/embeddings/vertex_ai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
try:
3838
import vertexai
3939
except ImportError:
40-
vertexai = None
40+
vertexai = None # type: ignore[assignment]
4141

4242

4343
class VertexAITextEmbeddings(EmbeddingsManager):

sdks/python/apache_beam/ml/transforms/embeddings/vertex_ai.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def __init__(
115115
def get_request(
116116
self,
117117
text_batch: Sequence[TextEmbeddingInput],
118-
model: MultiModalEmbeddingModel,
118+
model: TextEmbeddingModel,
119119
throttle_delay_secs: int):
120120
while self.throttler.throttle_request(time.time() * _MSEC_TO_SEC):
121121
LOGGER.info(
@@ -126,7 +126,7 @@ def get_request(
126126

127127
try:
128128
req_time = time.time()
129-
prediction = model.get_embeddings(text_batch)
129+
prediction = model.get_embeddings(list(text_batch))
130130
self.throttler.successful_request(req_time * _MSEC_TO_SEC)
131131
return prediction
132132
except TooManyRequests as e:
@@ -145,11 +145,11 @@ def run_inference(
145145
embeddings = []
146146
batch_size = _BATCH_SIZE
147147
for i in range(0, len(batch), batch_size):
148-
text_batch = batch[i:i + batch_size]
148+
text_batch_strs = batch[i:i + batch_size]
149149
text_batch = [
150150
TextEmbeddingInput(
151151
text=text, title=self.title, task_type=self.task_type)
152-
for text in text_batch
152+
for text in text_batch_strs
153153
]
154154
embeddings_batch = self.get_request(
155155
text_batch=text_batch, model=model, throttle_delay_secs=5)

0 commit comments

Comments
 (0)