ShadowProtector commited on
Commit
969ab0b
·
1 Parent(s): 8cc4893

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +188 -2
app.py CHANGED
@@ -1,6 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
- import transformers
 
 
 
3
 
4
 
5
 
6
- st.write("Hello")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ %%writefile app.py
2
+ from IPython.display import Javascript
3
+ from IPython import display
4
+ from google.colab import output
5
+ from base64 import b64decode
6
+ import datetime
7
+ import whisper
8
+ import openai
9
+ import os
10
+ import base64
11
+ from Crypto.Cipher import AES
12
+ from streamlit_bokeh_events import streamlit_bokeh_events
13
  import streamlit as st
14
+ from bokeh.models.widgets import Button
15
+ from bokeh.models.widgets.buttons import Button
16
+ from bokeh.models import CustomJS
17
+ from streamlit_bokeh_events import streamlit_bokeh_events
18
 
19
 
20
 
21
+ RECORD = """
22
+ const sleep = time => new Promise(resolve => setTimeout(resolve, time))
23
+ const b2text = blob => new Promise(resolve => {
24
+ const reader = new FileReader()
25
+ reader.onloadend = e => resolve(e.srcElement.result)
26
+ reader.readAsDataURL(blob)
27
+ })
28
+ var record = time => new Promise(async resolve => {
29
+ stream = await navigator.mediaDevices.getUserMedia({ audio: true })
30
+ recorder = new MediaRecorder(stream)
31
+ chunks = []
32
+ recorder.ondataavailable = e => chunks.push(e.data)
33
+ recorder.start()
34
+ await sleep(time)
35
+ recorder.onstop = async ()=>{
36
+ blob = new Blob(chunks)
37
+ text = await b2text(blob)
38
+ resolve(text)
39
+ }
40
+ recorder.stop()
41
+ })
42
+ """
43
+
44
+ openai.api_key = os.environ["API_KEY"]
45
+
46
+ with open("encrypt.txt", "r") as encfile:
47
+ encoder_txt = encfile.read()
48
+
49
+ with open("decrypt.txt", "r") as decfile:
50
+ decoder_txt = decfile.read()
51
+
52
+ def openai_fun(myprompt):
53
+ response_encoded = openai.Completion.create(
54
+ engine="text-davinci-003",
55
+ prompt = myprompt,
56
+ max_tokens=1024,
57
+ n=1,
58
+ stop=None,
59
+ temperature=0.5,
60
+ )
61
+ return response_encoded
62
+
63
+ def record(sec=5):
64
+ display.display(Javascript(RECORD))
65
+ s = output.eval_js('record(%d)' % (sec*1000))
66
+ b = b64decode(s.split(',')[1])
67
+ ts = datetime.datetime.now()
68
+ filename = ts.strftime("%Y_%m_%d_%H_%M_%S")
69
+ with open(f'{filename}.wav','wb') as f:
70
+ f.write(b)
71
+ return f'{filename}.wav' # or webm ?
72
+
73
+ model = whisper.load_model("base")
74
+ transcribed = []
75
+
76
+ while True:
77
+ user_choice = st.text_input("Do you want to record a new audio for transcription?[y/n]")
78
+
79
+ if user_choice == 'y':
80
+ st.write('Recording! (5 seconds)')
81
+ record(5)
82
+ folder_path = "/content"
83
+ audio_files = [f for f in os.listdir(folder_path) if f.endswith(".wav")]
84
+
85
+ audio_files.sort(key=lambda x: os.path.getmtime(os.path.join(folder_path, x)), reverse=True)
86
+ last_audio_file_path = os.path.join(folder_path, audio_files[0])
87
+ st.write('Transcribing audio file: ',last_audio_file_path)
88
+ # COMMENT IF NOT NEEDED:
89
+ if os.path.exists(last_audio_file_path) and not last_audio_file_path in transcribed:
90
+ audio = whisper.load_audio(last_audio_file_path)
91
+ audio = whisper.pad_or_trim(audio)
92
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
93
+ options = whisper.DecodingOptions(language= 'en', fp16=False)
94
+
95
+ result = whisper.decode(model, mel, options)
96
+
97
+ if result.no_speech_prob < 0.5:
98
+ mymsg = result.text
99
+ st.write("Actual Message: ",mymsg)
100
+
101
+ enc_prompt = encoder_txt + mymsg
102
+
103
+ openai_fun(enc_prompt)
104
+
105
+ if openai_fun(enc_prompt)['choices'][0]['text'] != "":
106
+ # print(response_encoded['choices'][0]['text'])
107
+ exec(openai_fun(enc_prompt)['choices'][0]['text'])
108
+ encoded_msg = enc(mymsg)
109
+ print("The encoded message: ", encoded_msg)
110
+
111
+ decode_ = st.text_input("Do you wish to decode the message?[y/n]")
112
+ if decode_ == "y":
113
+ dec_prompt = decoder_txt + str(encoded_msg)
114
+ response_decoded = openai.Completion.create(
115
+ engine="text-davinci-003",
116
+ prompt = dec_prompt,
117
+ max_tokens=500,
118
+ n=1,
119
+ stop=None,
120
+ temperature=0.5,
121
+ )
122
+
123
+ if response_decoded['choices'][0]['text'] != "":
124
+ print(response_decoded['choices'][0]['text'])
125
+ exec(response_decoded['choices'][0]['text'])
126
+ decoded_msg = dec(encoded_msg, key)
127
+ print("The decoded message: ", decoded_msg)
128
+
129
+
130
+ else:
131
+ st.write('Retry! The message could')
132
+
133
+
134
+ break # exit the loop
135
+
136
+ elif user_choice == 'n':
137
+ uc1 = input('Do you want to transcribe an existing audio?[y/n]')
138
+
139
+ if uc1 == 'y':
140
+ folder_path = "/content"
141
+ audio_files = [f for f in os.listdir(folder_path) if f.endswith(".wav")]
142
+ print('Audio files present: ',audio_files)
143
+
144
+ audio_files.sort(key=lambda x: os.path.getmtime(os.path.join(folder_path, x)), reverse=True)
145
+ last_audio_file_path = os.path.join(folder_path, audio_files[0])
146
+ print('Transcribing last audio file: ',last_audio_file_path)
147
+
148
+ # COMMENT IF NOT NEEDED:
149
+ if os.path.exists(last_audio_file_path) and not last_audio_file_path in transcribed:
150
+ audio = whisper.load_audio(last_audio_file_path)
151
+ audio = whisper.pad_or_trim(audio)
152
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
153
+ options = whisper.DecodingOptions(language= 'en', fp16=False)
154
+
155
+ result = whisper.decode(model, mel, options)
156
+
157
+ if result.no_speech_prob < 0.5:
158
+ mymsg = result.text
159
+ print("Actual Message: ",mymsg)
160
+ enc_prompt = encoder_txt + result.text
161
+ response_encoded = openai.Completion.create(
162
+ engine="text-davinci-003",
163
+ prompt = enc_prompt,
164
+ max_tokens=1024,
165
+ n=1,
166
+ stop=None,
167
+ temperature=0.5,
168
+ )
169
+
170
+ if response_encoded['choices'][0]['text'] != "":
171
+ # print(response_encoded['choices'][0]['text'])
172
+ exec(response_encoded['choices'][0]['text'])
173
+ encoded_msg = enc(mymsg)
174
+ st.write("The encoded message: ", encoded_msg)
175
+
176
+ else:
177
+ st.write('Retry! The message could')
178
+
179
+ # DELETE audio
180
+
181
+ break # exit the loop
182
+
183
+ elif uc1 == 'n':
184
+ continue # continue the loop, prompting for input again
185
+
186
+ else:
187
+ st.write('Invalid input, please enter y or n')
188
+ continue # continue the loop, prompting for input again
189
+
190
+ else:
191
+ st.write('Invalid input, please enter y or n')
192
+ continue # continue the loop, prompting for input again