TiniThingsInc commited on
Commit
ce958e0
ยท
verified ยท
1 Parent(s): f9fe5e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -144
app.py CHANGED
@@ -1,5 +1,5 @@
1
  """
2
- FF Embeddings API - Qwen3-Embedding-4B
3
  Multilingual semantic embeddings for tabletop RPG product classification
4
  """
5
 
@@ -13,9 +13,9 @@ import spaces # ZeroGPU decorator
13
  # Using Qwen3-Embedding-4B for 2560 native dimensions (truncate to 1536 for production)
14
  # Qwen3-4B is optimal for 1536 dims: 60% retention (vs 42.9% for GTE-Qwen2-7B)
15
  MODEL_NAME = "Qwen/Qwen3-Embedding-4B"
16
- print(f" Loading model: {MODEL_NAME}")
17
  model = SentenceTransformer(MODEL_NAME, trust_remote_code=True)
18
- print(f" Model loaded successfully")
19
  print(f" Native Dimensions: {model.get_sentence_embedding_dimension()}")
20
  print(f" Max Seq Length: {model.max_seq_length}")
21
  print(f" Matryoshka Support: Yes (truncate to any dimension โ‰ค {model.get_sentence_embedding_dimension()})")
@@ -35,7 +35,7 @@ def generate_embeddings(
35
  Args:
36
  texts: Single string or list of strings
37
  use_instruction: Whether to prepend instruction prefix (recommended)
38
- output_dimensions: Output embedding size (32-3584, default 1536 for production)
39
 
40
  Returns:
41
  List of embedding vectors (L2 normalized)
@@ -64,113 +64,36 @@ def generate_embeddings(
64
  # Qwen3-Embedding models support truncation to any dimension โ‰ค native_dims
65
  if output_dimensions != native_dims:
66
  if output_dimensions > native_dims:
67
- print(f"Warning: Requested {output_dimensions} dims but model has {native_dims}. Using {native_dims}.")
68
  output_dimensions = native_dims
69
  embeddings = embeddings[:, :output_dimensions]
70
 
71
  # Convert to list for JSON serialization
72
  return embeddings.tolist()
73
 
74
- def batch_generate(texts_input: str, use_instruction: bool, output_dims: int) -> str:
75
  """
76
  Gradio interface for batch embedding generation
77
  Expects newline-separated texts
78
  """
79
  if not texts_input.strip():
80
- return "โŒ Error: Please provide at least one text"
81
 
82
  texts = [t.strip() for t in texts_input.split('\n') if t.strip()]
83
 
84
  try:
85
  embeddings = generate_embeddings(texts, use_instruction, output_dims)
86
-
87
- result = f"Generated {len(embeddings)} embeddings\n"
88
- result += f"Dimensions: {len(embeddings[0])}\n"
89
- result += f"Languages: 100+ supported\n\n"
90
- result += "First embedding preview:\n"
91
- result += f"[{', '.join(f'{x:.3f}' for x in embeddings[0][:10])}...]\n"
92
-
93
- return result
94
  except Exception as e:
95
- return f"โŒ Error: {str(e)}"
96
 
97
- def calculate_all_similarities(emb1: np.ndarray, emb2: np.ndarray) -> dict:
98
- """
99
- Calculate comprehensive similarity metrics between two embeddings
100
- """
101
- # Cosine Similarity (for normalized vectors, just dot product)
102
- cosine = float(np.dot(emb1, emb2))
103
-
104
- # Euclidean Distance
105
- euclidean_dist = float(np.linalg.norm(emb1 - emb2))
106
- euclidean_sim = 1 / (1 + euclidean_dist)
107
-
108
- # Jaccard Similarity (min/max interpretation for continuous vectors)
109
- intersection = np.sum(np.minimum(np.abs(emb1), np.abs(emb2)))
110
- union = np.sum(np.maximum(np.abs(emb1), np.abs(emb2)))
111
- jaccard = float(intersection / union if union > 0 else 0)
112
-
113
- # Sorensen-Dice Coefficient
114
- intersection = np.sum(np.minimum(np.abs(emb1), np.abs(emb2)))
115
- sum_magnitudes = np.sum(np.abs(emb1)) + np.sum(np.abs(emb2))
116
- sorensen_dice = float(2 * intersection / sum_magnitudes if sum_magnitudes > 0 else 0)
117
-
118
- # Manhattan Distance
119
- manhattan = float(np.sum(np.abs(emb1 - emb2)))
120
-
121
- # Pearson Correlation
122
- pearson = float(np.corrcoef(emb1, emb2)[0, 1])
123
-
124
- return {
125
- 'cosine': cosine,
126
- 'euclidean_distance': euclidean_dist,
127
- 'euclidean_similarity': euclidean_sim,
128
- 'jaccard': jaccard,
129
- 'sorensen_dice': sorensen_dice,
130
- 'manhattan': manhattan,
131
- 'pearson': pearson
132
- }
133
-
134
- def interpret_similarity(score: float, metric: str) -> tuple[str, str]:
135
- """
136
- Interpret similarity score with emoji and description
137
- Returns: (emoji, description)
138
- """
139
- if metric in ['cosine', 'jaccard', 'sorensen_dice', 'euclidean_similarity']:
140
- if score > 0.9:
141
- return '๐ŸŸข', 'Nearly Identical'
142
- elif score > 0.7:
143
- return '๐ŸŸข', 'Very Similar'
144
- elif score > 0.5:
145
- return '๐ŸŸก', 'Moderately Similar'
146
- elif score > 0.3:
147
- return '๐ŸŸ ', 'Somewhat Similar'
148
- else:
149
- return '๐Ÿ”ด', 'Different'
150
- elif metric == 'pearson':
151
- if score > 0.9:
152
- return '๐ŸŸข', 'Strong Positive Correlation'
153
- elif score > 0.7:
154
- return '๐ŸŸก', 'Moderate Positive Correlation'
155
- elif score > 0.3:
156
- return '๐ŸŸ ', 'Weak Positive Correlation'
157
- elif score > -0.3:
158
- return 'โšช', 'No Correlation'
159
- elif score > -0.7:
160
- return '๐ŸŸ ', 'Weak Negative Correlation'
161
- elif score > -0.9:
162
- return '๐ŸŸก', 'Moderate Negative Correlation'
163
- else:
164
- return '๐Ÿ”ด', 'Strong Negative Correlation'
165
- else:
166
- return 'โšช', 'Unknown'
167
-
168
- def calculate_similarity(text1: str, text2: str, use_instruction: bool) -> str:
169
  """
170
  Calculate comprehensive similarity metrics between two texts
 
171
  """
172
  if not text1.strip() or not text2.strip():
173
- return "Error: Please provide both texts"
174
 
175
  try:
176
  embeddings = generate_embeddings([text1, text2], use_instruction)
@@ -178,67 +101,60 @@ def calculate_similarity(text1: str, text2: str, use_instruction: bool) -> str:
178
  # Calculate all similarity metrics
179
  emb1 = np.array(embeddings[0])
180
  emb2 = np.array(embeddings[1])
181
- metrics = calculate_all_similarities(emb1, emb2)
182
 
183
- # Build result string
184
- result = "**Comprehensive Similarity Analysis**\n\n"
185
 
186
- # Cosine Similarity (Primary)
187
- emoji, interpretation = interpret_similarity(metrics['cosine'], 'cosine')
188
- result += f"**Cosine Similarity:** {emoji} {metrics['cosine']:.4f}\n"
189
- result += f"โ””โ”€ {interpretation}\n\n"
190
 
191
- # Jaccard Similarity
192
- emoji, interpretation = interpret_similarity(metrics['jaccard'], 'jaccard')
193
- result += f"**Jaccard Similarity:** {emoji} {metrics['jaccard']:.4f}\n"
194
- result += f"โ””โ”€ {interpretation}\n\n"
195
 
196
  # Sorensen-Dice Coefficient
197
- emoji, interpretation = interpret_similarity(metrics['sorensen_dice'], 'sorensen_dice')
198
- result += f"**Sรธrensen-Dice:** {emoji} {metrics['sorensen_dice']:.4f}\n"
199
- result += f"โ””โ”€ {interpretation}\n\n"
200
-
201
- # Euclidean Distance & Similarity
202
- result += f"**Euclidean Distance:** {metrics['euclidean_distance']:.4f}\n"
203
- emoji, interpretation = interpret_similarity(metrics['euclidean_similarity'], 'euclidean_similarity')
204
- result += f"**Euclidean Similarity:** {emoji} {metrics['euclidean_similarity']:.4f}\n"
205
- result += f"โ””โ”€ {interpretation}\n\n"
206
 
207
  # Manhattan Distance
208
- result += f"**Manhattan Distance:** {metrics['manhattan']:.2f}\n\n"
209
 
210
  # Pearson Correlation
211
- emoji, interpretation = interpret_similarity(metrics['pearson'], 'pearson')
212
- result += f"**Pearson Correlation:** {emoji} {metrics['pearson']:.4f}\n"
213
- result += f"โ””โ”€ {interpretation}\n\n"
214
-
215
- # Overall assessment (based on cosine as primary)
216
- result += "---\n**Overall Assessment:**\n"
217
- cosine_emoji, cosine_interpretation = interpret_similarity(metrics['cosine'], 'cosine')
218
- result += f"{cosine_emoji} {cosine_interpretation} (Cosine: {metrics['cosine']:.4f})"
219
-
220
- return result
 
221
  except Exception as e:
222
- return f"โŒ Error: {str(e)}"
223
 
224
  # Create Gradio interface
225
- with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as demo:
226
  gr.Markdown("""
227
- # FF Embeddings API
228
 
229
  **Powered by Qwen3-Embedding-4B** - Advanced Multilingual Embedding Model
230
 
231
- - **100+ Languages** (English, Spanish, French, German, Chinese, Japanese, etc.)
232
- - **2560 Native Dimensions** (matryoshka truncation to 1536 for production)
233
- - **32K Context** (massive text support)
234
- - **Instruction-Aware** (optimized for RPG content)
235
- - **Matryoshka Support** (flexible 32-2560 dimensions)
236
- - **Optimal for 1536 dims** (60% dimension retention)
237
 
238
  Perfect for: Product classification, semantic search, recommendations, multilingual matching
239
  """)
240
 
241
- with gr.Tab("Generate Embeddings"):
242
  gr.Markdown("""
243
  Generate semantic embeddings for product descriptions, titles, or any text.
244
  Enter one text per line for batch processing.
@@ -259,9 +175,9 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
259
  submit_btn = gr.Button("Generate Embeddings", variant="primary")
260
 
261
  with gr.Column():
262
- output_text = gr.Textbox(label="Results", lines=12)
263
 
264
- submit_btn.click(batch_generate, inputs=[input_text, use_inst, output_dims], outputs=output_text)
265
 
266
  gr.Examples(
267
  examples=[
@@ -271,7 +187,7 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
271
  inputs=[input_text, use_inst, output_dims],
272
  )
273
 
274
- with gr.Tab("Similarity Calculator"):
275
  gr.Markdown("""
276
  **Comprehensive Similarity Analysis** - Compare two texts using multiple metrics:
277
 
@@ -301,7 +217,7 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
301
  calc_btn = gr.Button("Calculate Similarity", variant="primary")
302
 
303
  with gr.Column():
304
- similarity_output = gr.Textbox(label="Similarity Result", lines=8)
305
 
306
  calc_btn.click(calculate_similarity, inputs=[text1, text2, use_inst_sim], outputs=similarity_output)
307
 
@@ -314,9 +230,9 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
314
  inputs=[text1, text2, use_inst_sim],
315
  )
316
 
317
- with gr.Tab("API Documentation"):
318
  gr.Markdown("""
319
- ## Quick Start
320
 
321
  ### Python
322
 
@@ -324,7 +240,7 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
324
  import requests
325
  import numpy as np
326
 
327
- url = "https://TiniThingsInc-fairfate-embeddings.hf.space/api/predict"
328
 
329
  # Generate embeddings
330
  texts = [
@@ -350,7 +266,7 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
350
  ### TypeScript/JavaScript
351
 
352
  ```typescript
353
- const url = 'https://TiniThingsInc-fairfate-embeddings.hf.space/api/predict';
354
 
355
  const response = await fetch(url, {
356
  method: 'POST',
@@ -373,7 +289,7 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
373
 
374
  ```bash
375
  curl -X POST \\
376
- https://TiniThingsInc-fairfate-embeddings.hf.space/api/predict \\
377
  -H "Content-Type: application/json" \\
378
  -d '{
379
  "data": [["Your text here"], true, 1536],
@@ -381,7 +297,7 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
381
  }'
382
  ```
383
 
384
- ## Parameters
385
 
386
  | Parameter | Type | Default | Description |
387
  |-----------|------|---------|-------------|
@@ -389,7 +305,7 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
389
  | `use_instruction` | boolean | true | Add instruction prefix (improves accuracy) |
390
  | `output_dimensions` | number | 1536 | Output size (32-3584, production default: 1536) |
391
 
392
- ## Use Cases
393
 
394
  - **Product Classification**: Auto-tag by genre, system, theme
395
  - **Semantic Search**: Find by meaning, not keywords
@@ -397,13 +313,20 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
397
  - **Duplicate Detection**: Find similar listings
398
  - **Multilingual Matching**: Cross-language similarity
399
 
400
- ## Supported Languages
 
 
 
 
 
 
 
401
 
402
  English, Spanish, French, German, Italian, Portuguese, Russian, Polish, Dutch, Czech,
403
  Chinese, Japanese, Korean, Arabic, Hebrew, Hindi, Thai, Vietnamese, Indonesian,
404
  Turkish, Swedish, Norwegian, Danish, Finnish, Greek, Romanian, Hungarian, and 80+ more!
405
 
406
- ## Citation
407
 
408
  ```bibtex
409
  @misc{qwen3-embedding-2025,
@@ -415,7 +338,7 @@ with gr.Blocks(title="FF Embeddings API - Qwen3", theme=gr.themes.Soft()) as dem
415
  ```
416
  """)
417
 
418
- with gr.Tab("Model Info"):
419
  gr.Markdown(f"""
420
  ## Model Details
421
 
 
1
  """
2
+ FairFate Embeddings API - Qwen3-Embedding-4B
3
  Multilingual semantic embeddings for tabletop RPG product classification
4
  """
5
 
 
13
  # Using Qwen3-Embedding-4B for 2560 native dimensions (truncate to 1536 for production)
14
  # Qwen3-4B is optimal for 1536 dims: 60% retention (vs 42.9% for GTE-Qwen2-7B)
15
  MODEL_NAME = "Qwen/Qwen3-Embedding-4B"
16
+ print(f"๐Ÿ”„ Loading model: {MODEL_NAME}")
17
  model = SentenceTransformer(MODEL_NAME, trust_remote_code=True)
18
+ print(f"โœ… Model loaded successfully")
19
  print(f" Native Dimensions: {model.get_sentence_embedding_dimension()}")
20
  print(f" Max Seq Length: {model.max_seq_length}")
21
  print(f" Matryoshka Support: Yes (truncate to any dimension โ‰ค {model.get_sentence_embedding_dimension()})")
 
35
  Args:
36
  texts: Single string or list of strings
37
  use_instruction: Whether to prepend instruction prefix (recommended)
38
+ output_dimensions: Output embedding size (32-2560, default 1536 for production)
39
 
40
  Returns:
41
  List of embedding vectors (L2 normalized)
 
64
  # Qwen3-Embedding models support truncation to any dimension โ‰ค native_dims
65
  if output_dimensions != native_dims:
66
  if output_dimensions > native_dims:
67
+ print(f"โš ๏ธ Warning: Requested {output_dimensions} dims but model has {native_dims}. Using {native_dims}.")
68
  output_dimensions = native_dims
69
  embeddings = embeddings[:, :output_dimensions]
70
 
71
  # Convert to list for JSON serialization
72
  return embeddings.tolist()
73
 
74
+ def batch_generate(texts_input: str, use_instruction: bool, output_dims: int):
75
  """
76
  Gradio interface for batch embedding generation
77
  Expects newline-separated texts
78
  """
79
  if not texts_input.strip():
80
+ return {"error": "Please provide at least one text"}
81
 
82
  texts = [t.strip() for t in texts_input.split('\n') if t.strip()]
83
 
84
  try:
85
  embeddings = generate_embeddings(texts, use_instruction, output_dims)
86
+ return embeddings
 
 
 
 
 
 
 
87
  except Exception as e:
88
+ return {"error": str(e)}
89
 
90
+ def calculate_similarity(text1: str, text2: str, use_instruction: bool) -> dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  """
92
  Calculate comprehensive similarity metrics between two texts
93
+ Returns dict with all similarity metrics
94
  """
95
  if not text1.strip() or not text2.strip():
96
+ return {"error": "Please provide both texts"}
97
 
98
  try:
99
  embeddings = generate_embeddings([text1, text2], use_instruction)
 
101
  # Calculate all similarity metrics
102
  emb1 = np.array(embeddings[0])
103
  emb2 = np.array(embeddings[1])
 
104
 
105
+ # Cosine Similarity (for normalized vectors, just dot product)
106
+ cosine = float(np.dot(emb1, emb2))
107
 
108
+ # Euclidean Distance
109
+ euclidean_dist = float(np.linalg.norm(emb1 - emb2))
110
+ euclidean_sim = 1 / (1 + euclidean_dist)
 
111
 
112
+ # Jaccard Similarity (min/max interpretation for continuous vectors)
113
+ intersection = np.sum(np.minimum(np.abs(emb1), np.abs(emb2)))
114
+ union = np.sum(np.maximum(np.abs(emb1), np.abs(emb2)))
115
+ jaccard = float(intersection / union if union > 0 else 0)
116
 
117
  # Sorensen-Dice Coefficient
118
+ intersection = np.sum(np.minimum(np.abs(emb1), np.abs(emb2)))
119
+ sum_magnitudes = np.sum(np.abs(emb1)) + np.sum(np.abs(emb2))
120
+ sorensen_dice = float(2 * intersection / sum_magnitudes if sum_magnitudes > 0 else 0)
 
 
 
 
 
 
121
 
122
  # Manhattan Distance
123
+ manhattan = float(np.sum(np.abs(emb1 - emb2)))
124
 
125
  # Pearson Correlation
126
+ pearson = float(np.corrcoef(emb1, emb2)[0, 1])
127
+
128
+ return {
129
+ 'cosine': cosine,
130
+ 'euclidean_distance': euclidean_dist,
131
+ 'euclidean_similarity': euclidean_sim,
132
+ 'jaccard': jaccard,
133
+ 'sorensen_dice': sorensen_dice,
134
+ 'manhattan': manhattan,
135
+ 'pearson': pearson
136
+ }
137
  except Exception as e:
138
+ return {"error": str(e)}
139
 
140
  # Create Gradio interface
141
+ with gr.Blocks(title="FairFate Embeddings API - Qwen3", theme=gr.themes.Soft()) as demo:
142
  gr.Markdown("""
143
+ # ๐ŸŽฒ FairFate Embeddings API
144
 
145
  **Powered by Qwen3-Embedding-4B** - Advanced Multilingual Embedding Model
146
 
147
+ - ๐ŸŒ **100+ Languages** (English, Spanish, French, German, Chinese, Japanese, etc.)
148
+ - ๐Ÿ“ **2560 Native Dimensions** (matryoshka truncation to 1536 for production)
149
+ - ๐Ÿ“š **32K Context** (massive text support)
150
+ - โšก **Instruction-Aware** (optimized for RPG content)
151
+ - ๐Ÿ”ฌ **Matryoshka Support** (flexible 32-2560 dimensions)
152
+ - ๐Ÿ† **Optimal for 1536 dims** (60% dimension retention)
153
 
154
  Perfect for: Product classification, semantic search, recommendations, multilingual matching
155
  """)
156
 
157
+ with gr.Tab("๐Ÿ”ฎ Generate Embeddings"):
158
  gr.Markdown("""
159
  Generate semantic embeddings for product descriptions, titles, or any text.
160
  Enter one text per line for batch processing.
 
175
  submit_btn = gr.Button("Generate Embeddings", variant="primary")
176
 
177
  with gr.Column():
178
+ output_json = gr.JSON(label="Results")
179
 
180
+ submit_btn.click(batch_generate, inputs=[input_text, use_inst, output_dims], outputs=output_json)
181
 
182
  gr.Examples(
183
  examples=[
 
187
  inputs=[input_text, use_inst, output_dims],
188
  )
189
 
190
+ with gr.Tab("๐Ÿ” Similarity Calculator"):
191
  gr.Markdown("""
192
  **Comprehensive Similarity Analysis** - Compare two texts using multiple metrics:
193
 
 
217
  calc_btn = gr.Button("Calculate Similarity", variant="primary")
218
 
219
  with gr.Column():
220
+ similarity_output = gr.JSON(label="Similarity Result")
221
 
222
  calc_btn.click(calculate_similarity, inputs=[text1, text2, use_inst_sim], outputs=similarity_output)
223
 
 
230
  inputs=[text1, text2, use_inst_sim],
231
  )
232
 
233
+ with gr.Tab("๐Ÿ“– API Documentation"):
234
  gr.Markdown("""
235
+ ## ๐Ÿš€ Quick Start
236
 
237
  ### Python
238
 
 
240
  import requests
241
  import numpy as np
242
 
243
+ url = "https://YOUR_USERNAME-fairfate-embeddings.hf.space/api/predict"
244
 
245
  # Generate embeddings
246
  texts = [
 
266
  ### TypeScript/JavaScript
267
 
268
  ```typescript
269
+ const url = 'https://YOUR_USERNAME-fairfate-embeddings.hf.space/api/predict';
270
 
271
  const response = await fetch(url, {
272
  method: 'POST',
 
289
 
290
  ```bash
291
  curl -X POST \\
292
+ https://YOUR_USERNAME-fairfate-embeddings.hf.space/api/predict \\
293
  -H "Content-Type: application/json" \\
294
  -d '{
295
  "data": [["Your text here"], true, 1536],
 
297
  }'
298
  ```
299
 
300
+ ## ๐Ÿ“Š Parameters
301
 
302
  | Parameter | Type | Default | Description |
303
  |-----------|------|---------|-------------|
 
305
  | `use_instruction` | boolean | true | Add instruction prefix (improves accuracy) |
306
  | `output_dimensions` | number | 1536 | Output size (32-3584, production default: 1536) |
307
 
308
+ ## ๐ŸŽฏ Use Cases
309
 
310
  - **Product Classification**: Auto-tag by genre, system, theme
311
  - **Semantic Search**: Find by meaning, not keywords
 
313
  - **Duplicate Detection**: Find similar listings
314
  - **Multilingual Matching**: Cross-language similarity
315
 
316
+ ## โšก Performance
317
+
318
+ | Batch Size | GPU Throughput | CPU Throughput |
319
+ |------------|----------------|----------------|
320
+ | 1 | ~800/sec | ~80/sec |
321
+ | 32 | ~4000/sec | ~250/sec |
322
+
323
+ ## ๐ŸŒ Supported Languages
324
 
325
  English, Spanish, French, German, Italian, Portuguese, Russian, Polish, Dutch, Czech,
326
  Chinese, Japanese, Korean, Arabic, Hebrew, Hindi, Thai, Vietnamese, Indonesian,
327
  Turkish, Swedish, Norwegian, Danish, Finnish, Greek, Romanian, Hungarian, and 80+ more!
328
 
329
+ ## ๐Ÿ“ Citation
330
 
331
  ```bibtex
332
  @misc{qwen3-embedding-2025,
 
338
  ```
339
  """)
340
 
341
+ with gr.Tab("โ„น๏ธ Model Info"):
342
  gr.Markdown(f"""
343
  ## Model Details
344