Spaces:
Runtime error
Runtime error
Fix tokenizer
Browse files
app.py
CHANGED
|
@@ -36,7 +36,7 @@ else:
|
|
| 36 |
model_id, device_map="auto", load_in_8bit=True, use_auth_token=HF_TOKEN
|
| 37 |
)
|
| 38 |
|
| 39 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 40 |
|
| 41 |
PROMPT_TEMPLATE = """Question: {prompt}\n\nAnswer: """
|
| 42 |
|
|
@@ -108,18 +108,10 @@ def generate(instruction, temperature, max_new_tokens, top_p, length_penalty):
|
|
| 108 |
|
| 109 |
|
| 110 |
examples = [
|
| 111 |
-
"
|
| 112 |
-
"
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
Am 28. April 2021 rief das Unternehmen in Zusammenarbeit mit mehreren anderen Forschungsgruppen den BigScience Research Workshop ins Leben, um ein offenes großes Sprachmodell zu veröffentlichen.[4] Im Jahr 2022 wurde der Workshop mit der Ankündigung von BLOOM abgeschlossen, einem mehrsprachigen großen Sprachmodell mit 176 Milliarden Parametern.[5]"
|
| 117 |
-
|
| 118 |
-
Frage: Wann wurde Hugging Face gegründet?""",
|
| 119 |
-
"Erklären Sie, was eine API ist.",
|
| 120 |
-
"Bitte beantworten Sie die folgende Frage. Wer wird der nächste Ballon d'or sein?",
|
| 121 |
-
"Beantworten Sie die folgende Ja/Nein-Frage, indem Sie Schritt für Schritt argumentieren. Kannst du ein ganzes Haiku in einem einzigen Tweet schreiben?",
|
| 122 |
-
"Schreibe eine Produktbeschreibung für einen LG 43UQ75009LF 109 cm (43 Zoll) UHD Fernseher (Active HDR, 60 Hz, Smart TV) [Modelljahr 2022]",
|
| 123 |
]
|
| 124 |
|
| 125 |
|
|
@@ -135,12 +127,12 @@ with gr.Blocks(theme=theme) as demo:
|
|
| 135 |
)
|
| 136 |
with gr.Row():
|
| 137 |
with gr.Column(scale=3):
|
| 138 |
-
instruction = gr.Textbox(placeholder="
|
| 139 |
output = gr.Textbox(
|
| 140 |
interactive=False,
|
| 141 |
lines=8,
|
| 142 |
-
label="
|
| 143 |
-
placeholder="
|
| 144 |
)
|
| 145 |
submit = gr.Button("Generate", variant="primary")
|
| 146 |
gr.Examples(examples=examples, inputs=[instruction])
|
|
@@ -153,7 +145,7 @@ with gr.Blocks(theme=theme) as demo:
|
|
| 153 |
maximum=1.0,
|
| 154 |
step=0.1,
|
| 155 |
interactive=True,
|
| 156 |
-
info="
|
| 157 |
)
|
| 158 |
max_new_tokens = gr.Slider(
|
| 159 |
label="Max new tokens",
|
|
@@ -165,13 +157,13 @@ with gr.Blocks(theme=theme) as demo:
|
|
| 165 |
info="The maximum numbers of new tokens",
|
| 166 |
)
|
| 167 |
top_p = gr.Slider(
|
| 168 |
-
label="Top
|
| 169 |
value=0.9,
|
| 170 |
minimum=0.0,
|
| 171 |
maximum=1,
|
| 172 |
step=0.05,
|
| 173 |
interactive=True,
|
| 174 |
-
info="
|
| 175 |
)
|
| 176 |
length_penalty = gr.Slider(
|
| 177 |
label="Length penalty",
|
|
@@ -180,7 +172,7 @@ with gr.Blocks(theme=theme) as demo:
|
|
| 180 |
maximum=10.0,
|
| 181 |
step=0.1,
|
| 182 |
interactive=True,
|
| 183 |
-
info="> 0
|
| 184 |
)
|
| 185 |
|
| 186 |
submit.click(generate, inputs=[instruction, temperature, max_new_tokens, top_p, length_penalty], outputs=[output])
|
|
|
|
| 36 |
model_id, device_map="auto", load_in_8bit=True, use_auth_token=HF_TOKEN
|
| 37 |
)
|
| 38 |
|
| 39 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HF_TOKEN)
|
| 40 |
|
| 41 |
PROMPT_TEMPLATE = """Question: {prompt}\n\nAnswer: """
|
| 42 |
|
|
|
|
| 108 |
|
| 109 |
|
| 110 |
examples = [
|
| 111 |
+
"How do I create an array in C++ of length 5 which contains all even numbers between 1 and 10?",
|
| 112 |
+
"How can I write a Java function to generate the nth Fibonacci number?",
|
| 113 |
+
"How can I write a Python function that checks if a given number is a palindrome or not?",
|
| 114 |
+
"What is the output of the following code?\n\n```\nlist1 = ['a', 'b', 'c']\nlist2 = [1, 2, 3]\n\nfor x, y in zip(list1, list2):\n print(x * y)\n```",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
]
|
| 116 |
|
| 117 |
|
|
|
|
| 127 |
)
|
| 128 |
with gr.Row():
|
| 129 |
with gr.Column(scale=3):
|
| 130 |
+
instruction = gr.Textbox(placeholder="Enter your question here", label="Question")
|
| 131 |
output = gr.Textbox(
|
| 132 |
interactive=False,
|
| 133 |
lines=8,
|
| 134 |
+
label="Answer",
|
| 135 |
+
placeholder="Here will be the answer to your question",
|
| 136 |
)
|
| 137 |
submit = gr.Button("Generate", variant="primary")
|
| 138 |
gr.Examples(examples=examples, inputs=[instruction])
|
|
|
|
| 145 |
maximum=1.0,
|
| 146 |
step=0.1,
|
| 147 |
interactive=True,
|
| 148 |
+
info="Higher values produce more diverse outputs",
|
| 149 |
)
|
| 150 |
max_new_tokens = gr.Slider(
|
| 151 |
label="Max new tokens",
|
|
|
|
| 157 |
info="The maximum numbers of new tokens",
|
| 158 |
)
|
| 159 |
top_p = gr.Slider(
|
| 160 |
+
label="Top-p (nucleus sampling)",
|
| 161 |
value=0.9,
|
| 162 |
minimum=0.0,
|
| 163 |
maximum=1,
|
| 164 |
step=0.05,
|
| 165 |
interactive=True,
|
| 166 |
+
info="Higher values sample fewer low-probability tokens",
|
| 167 |
)
|
| 168 |
length_penalty = gr.Slider(
|
| 169 |
label="Length penalty",
|
|
|
|
| 172 |
maximum=10.0,
|
| 173 |
step=0.1,
|
| 174 |
interactive=True,
|
| 175 |
+
info="> 0 longer, < 0 shorter",
|
| 176 |
)
|
| 177 |
|
| 178 |
submit.click(generate, inputs=[instruction, temperature, max_new_tokens, top_p, length_penalty], outputs=[output])
|