Spaces:
Sleeping
Sleeping
Update demo project.py
Browse files- demo project.py +14 -10
demo project.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from langchain_groq import ChatGroq
|
| 3 |
-
import os
|
| 4 |
from langgraph.graph import StateGraph, START, END
|
| 5 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 6 |
from langchain_chroma import Chroma
|
|
@@ -12,6 +11,10 @@ import time
|
|
| 12 |
import os
|
| 13 |
from langchain_community.document_loaders import PyPDFLoader
|
| 14 |
from langchain_huggingface import HuggingFaceEmbeddings
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
class State(TypedDict):
|
| 17 |
query: str
|
|
@@ -25,23 +28,24 @@ class checker_class(BaseModel):
|
|
| 25 |
is_relevant: bool = Field(description="Check whether the given query is relevant to the company.")
|
| 26 |
|
| 27 |
def invoke_llm(query):
|
| 28 |
-
llm =
|
|
|
|
| 29 |
try:
|
| 30 |
-
res = llm.
|
| 31 |
except:
|
| 32 |
time.sleep(60)
|
| 33 |
res = llm.invoke(query)
|
| 34 |
-
return res
|
| 35 |
|
| 36 |
def invoke_relevance_checker_llm(query):
|
| 37 |
-
llm =
|
| 38 |
-
|
| 39 |
try:
|
| 40 |
-
res =
|
| 41 |
except:
|
| 42 |
time.sleep(60)
|
| 43 |
-
res =
|
| 44 |
-
return res
|
| 45 |
|
| 46 |
def safety_checker(state:State):
|
| 47 |
llm = ChatGroq(model='meta-llama/llama-guard-4-12b')
|
|
@@ -53,7 +57,7 @@ def safety_checker(state:State):
|
|
| 53 |
return {'is_safe':False, 'answer':"<SAFETY CHECKER> That prompt was harmful, please try something else"}
|
| 54 |
|
| 55 |
def relevance_checker(state:State):
|
| 56 |
-
prompt = "You are a lenient relevance-checking assistant. You will be given a user query and a company description. Your job is to decide whether the query is relevant to the company.\n✅ Approve most queries that are even loosely related.\n🚫 Only reject queries that are **clearly unrelated** or have **no connection at all
|
| 57 |
prompt += f"\nQuery: {state['query']}"
|
| 58 |
prompt += f"\nDescription: {state['company_description']}"
|
| 59 |
res = invoke_relevance_checker_llm(prompt)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from langchain_groq import ChatGroq
|
|
|
|
| 3 |
from langgraph.graph import StateGraph, START, END
|
| 4 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 5 |
from langchain_chroma import Chroma
|
|
|
|
| 11 |
import os
|
| 12 |
from langchain_community.document_loaders import PyPDFLoader
|
| 13 |
from langchain_huggingface import HuggingFaceEmbeddings
|
| 14 |
+
from bytez import Bytez
|
| 15 |
+
|
| 16 |
+
BYTEZ_API_KEY = os.getenv("BYTEZ_API_KEY")
|
| 17 |
+
sdk = Bytez(BYTEZ_API_KEY)
|
| 18 |
|
| 19 |
class State(TypedDict):
|
| 20 |
query: str
|
|
|
|
| 28 |
is_relevant: bool = Field(description="Check whether the given query is relevant to the company.")
|
| 29 |
|
| 30 |
def invoke_llm(query):
|
| 31 |
+
llm = sdk.model("openai/gpt-4o-mini")
|
| 32 |
+
Inp = [{"role": "user", "content": query}]
|
| 33 |
try:
|
| 34 |
+
res = llm.run(Inp)
|
| 35 |
except:
|
| 36 |
time.sleep(60)
|
| 37 |
res = llm.invoke(query)
|
| 38 |
+
return res[0]['content']
|
| 39 |
|
| 40 |
def invoke_relevance_checker_llm(query):
|
| 41 |
+
llm = sdk.model("openai/gpt-4o-mini")
|
| 42 |
+
Inp = [{"role": "user", "content": query}]
|
| 43 |
try:
|
| 44 |
+
res = llm.run(Inp)
|
| 45 |
except:
|
| 46 |
time.sleep(60)
|
| 47 |
+
res = llm.invoke(query)
|
| 48 |
+
return res[0]['content']
|
| 49 |
|
| 50 |
def safety_checker(state:State):
|
| 51 |
llm = ChatGroq(model='meta-llama/llama-guard-4-12b')
|
|
|
|
| 57 |
return {'is_safe':False, 'answer':"<SAFETY CHECKER> That prompt was harmful, please try something else"}
|
| 58 |
|
| 59 |
def relevance_checker(state:State):
|
| 60 |
+
prompt = "You are a lenient relevance-checking assistant. You will be given a user query and a company description. Your job is to decide whether the query is relevant to the company.\n✅ Approve most queries that are even loosely related.\n🚫 Only reject queries that are **clearly unrelated** or have **no connection at all**. Return only 'True' if it is relevant, otherwise 'False'\n\n"
|
| 61 |
prompt += f"\nQuery: {state['query']}"
|
| 62 |
prompt += f"\nDescription: {state['company_description']}"
|
| 63 |
res = invoke_relevance_checker_llm(prompt)
|