Skip to content

Commit 1f0a3d7

Browse files
committed
Added CLI
1 parent 378d825 commit 1f0a3d7

File tree

9 files changed

+95
-9
lines changed

9 files changed

+95
-9
lines changed

README.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ This library provides you with an instant, easy-to-use interface for CLIP, allow
1616
Ensure that you have Python 3.8 or newer and `pip` installed on your system. We highly recommend using a Virtual Environment to avoid any potential package conflicts.
1717

1818
To install the service, enter the following command:
19-
```
19+
```bash
2020
pip install clip-api-service
2121
```
2222

2323
Once the installation process is complete, you can start the service by running:
24-
```
25-
clip-api-service serve --model_name=ViT-B-32:openai
24+
```bash
25+
clip-api-service serve --model-name=ViT-B-32:openai
2626
```
2727
Your service is now running! Interact with it via the Swagger UI at `localhost:3000`
2828
![SwaggerUI](images/swagger-ui.png)
@@ -67,7 +67,7 @@ bentoml yatai login --api-token <your-api-token> --endpoint <bento-cloud-endpoin
6767
Next, build your BentoML service using the `build` command:
6868

6969
```bash
70-
clip-api-service build --model_name=ViT-B-32:openai
70+
clip-api-service build --model-name=ViT-B-32:openai
7171
```
7272

7373
Lastly, push your freshly-built Bento service to BentoCloud using the `push` command:
@@ -181,13 +181,13 @@ And the response looks like:
181181
Spins up a HTTP Server with the model of your choice.
182182

183183
Arguments:
184-
* `--model_name` : Name of the CLIP model. Use `list_models` to see the list of available model. Default: `openai/clip-vit-large-patch14`
184+
* `--model-name` : Name of the CLIP model. Use `list_models` to see the list of available model. Default: `openai/clip-vit-large-patch14`
185185

186186
#### `build`
187187
Builds a Bento with the model of your choice
188188

189189
Arguments:
190-
* `--model_name` : Name of the CLIP model. Use `list_models` to see the list of available model. Default: `openai/clip-vit-large-patch14`
190+
* `--model-name` : Name of the CLIP model. Use `list_models` to see the list of available model. Default: `openai/clip-vit-large-patch14`
191191

192192
#### `list_models`
193193
List all available CLIP models.

pyproject.toml

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,23 @@ keywords = [
3131
]
3232
dependencies = [
3333
"bentoml>=1.0.20",
34+
"bentoml",
35+
"transformers",
36+
"accelerate",
37+
"optimum",
38+
"pydantic",
39+
"Pillow",
40+
"open-clip-torch",
41+
"torch",
42+
"typer",
3443
]
3544

3645
[project.urls]
3746
"Homepage" = "https://github.com/bentoml/CLIP-API-service"
3847
"Bug Tracker" = "https://github.com/bentoml/CLIP-API-service/issues"
3948

40-
49+
[project.scripts]
50+
clip-api-service = "cli:app"
4151

4252
[tool.pdm.dev-dependencies]
4353
test = [

requirements-dev.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,3 +6,4 @@ pydantic
66
Pillow
77
open_clip_torch
88
torch
9+
typer

src/cli.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import os
2+
import typer
3+
import subprocess
4+
5+
from typing_extensions import Annotated
6+
from clip_api_service import list_models as _list_models
7+
from clip_api_service.build import build_bento
8+
from clip_api_service.models import MODEL_ENV_VAR_KEY, DEFAULT_MODEL_NAME
9+
app = typer.Typer()
10+
11+
@app.command()
12+
def serve(model_name: Annotated[str, typer.Option(help="CLIP Model name")] = DEFAULT_MODEL_NAME):
13+
env = os.environ.copy()
14+
env[MODEL_ENV_VAR_KEY] = model_name
15+
16+
try:
17+
subprocess.run(["bentoml", "serve", "clip_api_service.service:svc"], env=env, check=True)
18+
except subprocess.CalledProcessError as e:
19+
typer.echo(f"Command 'bentoml serve {model_name}' failed with error code {e.returncode}")
20+
21+
22+
@app.command()
23+
def build(
24+
model_name: Annotated[str, typer.Option(help="CLIP Model name")] = DEFAULT_MODEL_NAME,
25+
use_gpu: Annotated[bool, typer.Option(help="Use GPU for build")] = False,
26+
):
27+
build_bento(model_name=model_name, use_gpu=use_gpu)
28+
29+
@app.command()
30+
def list_models():
31+
print(_list_models())
32+
33+
if __name__ == "__main__":
34+
app()

src/clip_api_service/bentofiles/bentofile.openai.cpu.yaml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,10 @@ service: "clip_api_service.service:svc"
22
include:
33
- "clip_api_service/*.py"
44
python:
5-
requirements_txt: "./requirements.txt"
5+
packages:
6+
- bentoml
7+
- transformers
8+
- accelerate
9+
- optimum
10+
- pydantic
11+
- Pillow

src/clip_api_service/bentofiles/bentofile.openai.gpu.yaml

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,12 @@ service: "clip_api_service.service:svc"
22
include:
33
- "clip_api_service/*.py"
44
python:
5-
requirements_txt: "./requirements.txt"
5+
packages:
6+
- bentoml
7+
- transformers
8+
- accelerate
9+
- optimum
10+
- pydantic
11+
- Pillow
12+
docker:
13+
cuda_version: "11.6.2"

src/clip_api_service/models/openai.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,16 @@ def download_model(model_name: str) -> bentoml.Model:
4242
)
4343
return bentoml.models.get(bento_model_tag)
4444

45+
def bentofile_path(use_gpu: bool = False) -> str:
46+
import os
47+
build_ctx = os.path.dirname(os.path.dirname(__file__))
48+
49+
declaration = {
50+
False: os.path.join(build_ctx, "bentofiles", "bentofile.openai.cpu.yaml"),
51+
True: os.path.join(build_ctx, "bentofiles", "bentofile.openai.gpu.yaml"),
52+
}
53+
54+
return declaration[use_gpu]
4555

4656
class OpenAICLIPRunnable(CLIPRunnable):
4757
SUPPORTED_RESOURCES = ("nvidia.com/gpu", "cpu")

src/clip_api_service/models/openclip.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@
102102
}
103103

104104

105+
105106
def get_bento_model_tag(model_name: str) -> bentoml.Tag:
106107
model_version = model_name.replace(":", ".")
107108
return bentoml.Tag("openclip", model_version.lower())
@@ -131,6 +132,16 @@ def download_model(model_name: str) -> bentoml.Model:
131132
)
132133
return bentoml.models.get(bento_model_tag)
133134

135+
def bentofile_path(use_gpu: bool = False) -> str:
136+
import os
137+
build_ctx = os.path.dirname(os.path.dirname(__file__))
138+
139+
declaration = {
140+
False: os.path.join(build_ctx, "bentofiles", "bentofile.openclip.cpu.yaml"),
141+
True: os.path.join(build_ctx, "bentofiles", "bentofile.openclip.gpu.yaml"),
142+
}
143+
144+
return declaration[use_gpu]
134145

135146
class OpenClipRunnable(CLIPRunnable):
136147
SUPPORTED_RESOURCES = ("nvidia.com/gpu", "cpu")

src/main.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,3 +41,9 @@ def test():
4141
# server = bentoml.HTTPServer(svc)
4242
# server.start(blocking=False)
4343
test()
44+
45+
#%%
46+
from clip_api_service.build import build_bento
47+
# %%
48+
build_bento(model_name="ViT-B-32:openai")
49+
# %%

0 commit comments

Comments
 (0)