bandhit commited on
Commit
d77cb0c
1 Parent(s): 4298836

Added gradio

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +127 -0
  3. requirements.txt +11 -0
  4. style.css +16 -0
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Typhoon-7b-q4-bnb Cuda
3
  emoji: 🏃
4
  colorFrom: gray
5
  colorTo: red
 
1
  ---
2
+ title: typhoon-7b-q4-bnb_cuda
3
  emoji: 🏃
4
  colorFrom: gray
5
  colorTo: red
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import os
4
+ from threading import Thread
5
+ from typing import Iterator
6
+
7
+ import gradio as gr
8
+ import spaces
9
+ import torch
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
11
+
12
+ DESCRIPTION = "# Typhoon 7B via 4-bits Quantization"
13
+
14
+ if not torch.cuda.is_available():
15
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
16
+
17
+ MAX_MAX_NEW_TOKENS = 2048
18
+ DEFAULT_MAX_NEW_TOKENS = 128
19
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
20
+
21
+ if torch.cuda.is_available():
22
+ model_id = "bandhit/typhoon-7b-q4-bnb_cuda-ts-1703352224"
23
+ model = AutoModelForCausalLM.from_pretrained(model_id)
24
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
25
+ tokenizer.pad_token_id = tokenizer.eos_token_id
26
+
27
+ @spaces.GPU
28
+ def generate(
29
+ message: str,
30
+ chat_history: list[tuple[str, str]],
31
+ max_new_tokens: int = 128,
32
+ temperature: float = 0.7,
33
+ top_p: float = 0.95,
34
+ top_k: int = 50,
35
+ repetition_penalty: float = 1.3,
36
+ ) -> Iterator[str]:
37
+ conversation = []
38
+ for user, assistant in chat_history:
39
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
40
+ conversation.append({"role": "user", "content": message})
41
+
42
+
43
+ chat_template = "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif true == true and not '<<SYS>>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = '' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}"
44
+ input_ids = tokenizer.apply_chat_template(conversation, chat_template, return_tensors="pt")
45
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
46
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
47
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
48
+ input_ids = input_ids.to(model.device)
49
+
50
+ streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
51
+ generate_kwargs = dict(
52
+ {"input_ids": input_ids},
53
+ streamer=streamer,
54
+ max_new_tokens=max_new_tokens,
55
+ do_sample=True,
56
+ top_p=top_p,
57
+ top_k=top_k,
58
+ temperature=temperature,
59
+ num_beams=1,
60
+ repetition_penalty=repetition_penalty,
61
+ )
62
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
63
+ t.start()
64
+
65
+ outputs = []
66
+ for text in streamer:
67
+ outputs.append(text)
68
+ yield "".join(outputs)
69
+
70
+
71
+ chat_interface = gr.ChatInterface(
72
+ fn=generate,
73
+ additional_inputs=[
74
+ gr.Slider(
75
+ label="Max new tokens",
76
+ minimum=1,
77
+ maximum=MAX_MAX_NEW_TOKENS,
78
+ step=1,
79
+ value=DEFAULT_MAX_NEW_TOKENS,
80
+ ),
81
+ gr.Slider(
82
+ label="Temperature",
83
+ minimum=0.1,
84
+ maximum=4.0,
85
+ step=0.1,
86
+ value=0.7,
87
+ ),
88
+ gr.Slider(
89
+ label="Top-p (nucleus sampling)",
90
+ minimum=0.05,
91
+ maximum=1.0,
92
+ step=0.05,
93
+ value=0.95,
94
+ ),
95
+ gr.Slider(
96
+ label="Top-k",
97
+ minimum=1,
98
+ maximum=1000,
99
+ step=1,
100
+ value=50,
101
+ ),
102
+ gr.Slider(
103
+ label="Repetition penalty",
104
+ minimum=1.0,
105
+ maximum=2.0,
106
+ step=0.05,
107
+ value=1.3,
108
+ ),
109
+ ],
110
+ stop_btn=None,
111
+ examples=[
112
+ ["ชีวิตคืออะไร?"],
113
+ ["ความหมายของชีวิตคืออะไร?"],
114
+ ],
115
+ )
116
+
117
+ with gr.Blocks(css="style.css") as demo:
118
+ gr.Markdown(DESCRIPTION)
119
+ gr.DuplicateButton(
120
+ value="Duplicate Space for private use",
121
+ elem_id="duplicate-button",
122
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
123
+ )
124
+ chat_interface.render()
125
+
126
+ if __name__ == "__main__":
127
+ demo.queue(max_size=20).launch()
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.25.0
2
+ bitsandbytes==0.41.3
3
+ gradio==4.9.0
4
+ protobuf==4.23.4
5
+ scipy==1.11.4
6
+ sentencepiece==0.1.99
7
+ spaces==0.18.0
8
+ torch==2.1.2
9
+ #transformers==4.37.0.dev0
10
+ transformers @ git+https://github.com/huggingface/transformers
11
+
style.css ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+
5
+ #duplicate-button {
6
+ margin: auto;
7
+ color: white;
8
+ background: #1565c0;
9
+ border-radius: 100vh;
10
+ }
11
+
12
+ .contain {
13
+ max-width: 900px;
14
+ margin: auto;
15
+ padding-top: 1.5rem;
16
+ }