diff --git a/README.md b/README.md index ca4ca63..e38e400 100644 --- a/README.md +++ b/README.md @@ -100,14 +100,14 @@ pip install "rembg[cli]" # for library + cli Otherwise, install `rembg` with explicit CPU/GPU support. -CPU support: +### CPU support: ```bash pip install rembg[cpu] # for library pip install "rembg[cpu,cli]" # for library + cli ``` -GPU support: +### GPU support: First of all, you need to check if your system supports the `onnxruntime-gpu`. @@ -124,6 +124,8 @@ pip install "rembg[gpu]" # for library pip install "rembg[gpu,cli]" # for library + cli ``` +Nvidia GPU may require onnxruntime-gpu, cuda, and cudnn-devel. [#668](https://github.com/danielgatis/rembg/issues/668#issuecomment-2689830314) . If rembg[gpu] couldn't work probably and your can't install cuda or cudnn-devel, use rembg[cpu] and onnxruntime instead. + ## Usage as a cli After the installation step you can use rembg just typing `rembg` in your terminal window. @@ -346,6 +348,8 @@ Try this: docker run -v path/to/input:/rembg danielgatis/rembg i input.png path/to/output/output.png ``` +Notice: Right now docker version only support CPU Acceleration. + ## Models All models are downloaded and saved in the user home folder in the `.u2net` directory. diff --git a/rembg/sessions/base.py b/rembg/sessions/base.py index bfcb115..0d97d44 100644 --- a/rembg/sessions/base.py +++ b/rembg/sessions/base.py @@ -13,9 +13,17 @@ class BaseSession: def __init__(self, model_name: str, sess_opts: ort.SessionOptions, *args, **kwargs): """Initialize an instance of the BaseSession class.""" self.model_name = model_name + + device_type = ort.get_device() + if device_type == 'GPU' and 'CUDAExecutionProvider' in ort.get_available_providers(): + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] + else: + providers = ['CPUExecutionProvider'] + self.inner_session = ort.InferenceSession( str(self.__class__.download_models(*args, **kwargs)), sess_options=sess_opts, + providers=providers, ) def normalize(