@@ -39,8 +39,8 @@ using JSON metadata:
39
39
``` py
40
40
from datachain import Column, DataChain
41
41
42
- meta = DataChain.from_json(" gs://datachain-demo/dogs-and-cats/*json" , object_name = " meta" )
43
- images = DataChain.from_storage(" gs://datachain-demo/dogs-and-cats/*jpg" )
42
+ meta = DataChain.from_json(" gs://datachain-demo/dogs-and-cats/*json" , object_name = " meta" , anon = True )
43
+ images = DataChain.from_storage(" gs://datachain-demo/dogs-and-cats/*jpg" , anon = True )
44
44
45
45
images_id = images.map(id = lambda file : file .path.split(' .' )[- 2 ])
46
46
annotated = images_id.merge(meta, on = " id" , right_on = " meta.id" )
@@ -78,7 +78,7 @@ def is_positive_dialogue_ending(file) -> bool:
78
78
79
79
chain = (
80
80
DataChain.from_storage(" gs://datachain-demo/chatbot-KiT/" ,
81
- object_name = " file" , type = " text" )
81
+ object_name = " file" , type = " text" , anon = True )
82
82
.settings(parallel = 8 , cache = True )
83
83
.map(is_positive = is_positive_dialogue_ending)
84
84
.save(" file_response" )
@@ -132,7 +132,7 @@ def eval_dialogue(file: File) -> bool:
132
132
return result.lower().startswith(" success" )
133
133
134
134
chain = (
135
- DataChain.from_storage(" gs://datachain-demo/chatbot-KiT/" , object_name = " file" )
135
+ DataChain.from_storage(" gs://datachain-demo/chatbot-KiT/" , object_name = " file" , anon = True )
136
136
.map(is_success = eval_dialogue)
137
137
.save(" mistral_files" )
138
138
)
@@ -177,7 +177,7 @@ def eval_dialog(file: File) -> ChatCompletionResponse:
177
177
{" role" : " user" , " content" : file .read()}])
178
178
179
179
chain = (
180
- DataChain.from_storage(" gs://datachain-demo/chatbot-KiT/" , object_name = " file" )
180
+ DataChain.from_storage(" gs://datachain-demo/chatbot-KiT/" , object_name = " file" , anon = True )
181
181
.settings(parallel = 4 , cache = True )
182
182
.map(response = eval_dialog)
183
183
.map(status = lambda response : response.choices[0 ].message.content.lower()[:7 ])
@@ -273,7 +273,7 @@ from datachain import C, DataChain
273
273
processor = CLIPProcessor.from_pretrained(" openai/clip-vit-base-patch32" )
274
274
275
275
chain = (
276
- DataChain.from_storage(" gs://datachain-demo/dogs-and-cats/" , type = " image" )
276
+ DataChain.from_storage(" gs://datachain-demo/dogs-and-cats/" , type = " image" , anon = True )
277
277
.map(label = lambda name : name.split(" ." )[0 ], params = [" file.name" ])
278
278
.select(" file" , " label" ).to_pytorch(
279
279
transform = processor.image_processor,
0 commit comments