Skip to content

Commit

Permalink
Fix issues with CI builds (#1589)
Browse files Browse the repository at this point in the history
## Fix issues with CI builds

* Relax version of bitsandbytes
* Add triton to requirements
* Few fixes for using newer version of torch

## Checklist before requesting a review
- [ ] Add unit tests for this change.
- [ ] Make sure all tests can pass.
- [ ] Update documents if necessary.
- [x] Lint and apply fixes to your code by running `lintrunner -a`
- [ ] Is this a user-facing change? If yes, give a description of this
change to be included in the release notes.
- [ ] Is this PR including examples changes? If yes, please remember to
update [example
documentation](https://github.com/microsoft/Olive/blob/main/docs/source/examples.md)
in a follow-up PR.

## (Optional) Issue link
  • Loading branch information
shaahji authored Jan 31, 2025
1 parent d98186d commit 0942a2d
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion olive/model/handler/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def load_model(self, rank: int = None, cache_model: bool = True) -> "torch.nn.Mo
elif self.model_file_format == ModelFileFormat.PYTORCH_TORCH_SCRIPT:
model = torch.jit.load(self.model_path)
elif self.model_file_format == ModelFileFormat.PYTORCH_ENTIRE_MODEL:
model = torch.load(self.model_path)
model = torch.load(self.model_path, weights_only=False)
elif self.model_file_format == ModelFileFormat.PYTORCH_SLICE_GPT_MODEL:
model = self._load_slicegpt_model()
elif self.model_file_format == ModelFileFormat.PYTORCH_STATE_DICT:
Expand Down
4 changes: 2 additions & 2 deletions olive/olive_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -363,13 +363,13 @@
"extra_dependencies": {
"auto-opt": [ "optimum" ],
"azureml": [ "azure-ai-ml>=1.11.1", "azure-keyvault-secrets", "azure-identity", "azureml-fsspec" ],
"bnb": [ "bitsandbytes" ],
"bnb": [ "bitsandbytes", "triton" ],
"capture-onnx-graph": [ "onnxruntime-genai", "optimum" ],
"cpu": [ "onnxruntime" ],
"directml": [ "onnxruntime-directml" ],
"docker": [ "docker" ],
"shared-cache": [ "azure-identity", "azure-storage-blob" ],
"finetune": [ "onnxruntime-genai", "optimum", "accelerate>=0.30.0", "peft", "scipy", "bitsandbytes" ],
"finetune": [ "onnxruntime-genai", "optimum", "accelerate>=0.30.0", "peft", "scipy", "bitsandbytes", "triton" ],
"flash-attn": [ "flash_attn" ],
"gpu": [ "onnxruntime-gpu" ],
"inc": [ "neural-compressor" ],
Expand Down
4 changes: 2 additions & 2 deletions test/requirements-test-gpu.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
-r requirements-test.txt
auto-gptq
autoawq
# only available on Linux currently
bitsandbytes==0.43.3
bitsandbytes
triton
2 changes: 1 addition & 1 deletion test/unit_test/model/test_pytorch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_load_from_path(torch_load):
model = PyTorchModelHandler(model_path="test_path")

assert model.load_model() == "dummy_pytorch_model"
torch_load.assert_called_once_with("test_path")
torch_load.assert_called_once_with("test_path", weights_only=False)


@patch("olive.model.handler.pytorch.UserModuleLoader")
Expand Down

0 comments on commit 0942a2d

Please sign in to comment.