Spaces:
Running
Running
admin
commited on
Commit
·
16df88a
1
Parent(s):
8e0663f
combine all tools
Browse files
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
*__pycache__*
|
2 |
+
test.*
|
README.md
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: 🈂️🆒🈳
|
4 |
colorFrom: green
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
11 |
-
short_description:
|
12 |
---
|
|
|
1 |
---
|
2 |
+
title: Online tools
|
3 |
emoji: 🈂️🆒🈳
|
4 |
colorFrom: green
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.22.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
11 |
+
short_description: Online tools collection
|
12 |
---
|
app.py
CHANGED
@@ -1,54 +1,42 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import requests
|
4 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
return "Please enter valid text and select the mode!"
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
"source": source,
|
14 |
-
"trans_type": direction,
|
15 |
-
"request_id": "demo",
|
16 |
-
"detect": True,
|
17 |
-
}
|
18 |
-
headers = {
|
19 |
-
"content-type": "application/json",
|
20 |
-
"x-authorization": f"token {os.getenv('apikey_caiyun')}",
|
21 |
-
}
|
22 |
-
try:
|
23 |
-
response = requests.request(
|
24 |
-
"POST",
|
25 |
-
os.getenv("api_caiyun"),
|
26 |
-
data=json.dumps(payload),
|
27 |
-
headers=headers,
|
28 |
-
)
|
29 |
|
30 |
-
|
|
|
31 |
|
32 |
-
|
33 |
-
|
34 |
|
|
|
|
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
],
|
47 |
-
outputs=gr.TextArea(label="Translation results", show_copy_button=True),
|
48 |
-
flagging_mode="never",
|
49 |
-
examples=[
|
50 |
-
["彩云小译は最高の翻訳サービスです", "auto2en"],
|
51 |
-
["Lingocloud is the best translation service.", "auto2zh"],
|
52 |
-
],
|
53 |
-
cache_examples=False,
|
54 |
-
).launch()
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from data import data_converter
|
3 |
+
from exif import clexif
|
4 |
+
from gif import video2gif
|
5 |
+
from github import github_release_creator
|
6 |
+
from qr import qrcode
|
7 |
+
from rct import rct_generator
|
8 |
+
from smtp import smtp_tester
|
9 |
+
from trans import translator
|
10 |
+
from url import url_shortner
|
11 |
|
12 |
+
if __name__ == "__main__":
|
13 |
+
with gr.Blocks() as demo:
|
14 |
+
gr.Markdown("# Online Tools Collection")
|
15 |
+
with gr.Tab("Data Converter"):
|
16 |
+
data_converter()
|
17 |
|
18 |
+
with gr.Tab("Image EXIF Cleaner"):
|
19 |
+
clexif()
|
|
|
20 |
|
21 |
+
with gr.Tab("Video to GIF"):
|
22 |
+
video2gif()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
with gr.Tab("GitHub Releaser"):
|
25 |
+
github_release_creator()
|
26 |
|
27 |
+
with gr.Tab("QR Code"):
|
28 |
+
qrcode()
|
29 |
|
30 |
+
with gr.Tab("RCT Generator"):
|
31 |
+
rct_generator()
|
32 |
|
33 |
+
with gr.Tab("SMTP Test"):
|
34 |
+
smtp_tester()
|
35 |
+
|
36 |
+
with gr.Tab("Translator"):
|
37 |
+
translator()
|
38 |
+
|
39 |
+
with gr.Tab("URL Shortner"):
|
40 |
+
url_shortner()
|
41 |
+
|
42 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
TMP_DIR = "./__pycache__"
|
4 |
+
|
5 |
+
TAB_CONFIG = ["jsonl ⇆ csv", "json ⇆ csv", "json ⇆ jsonl"]
|
6 |
+
MODE = {"from": "jsonl", "to": "csv"}
|
7 |
+
HEADER = {
|
8 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36 Edg/132.0.0.0",
|
9 |
+
}
|
10 |
+
|
11 |
+
API_SMTP = os.getenv("api_smtp")
|
12 |
+
API_TRANS = os.getenv("api_caiyun")
|
13 |
+
KEY_TRANS = os.getenv("apikey_caiyun")
|
data.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import csv
|
3 |
+
import json
|
4 |
+
import shutil
|
5 |
+
import gradio as gr
|
6 |
+
import pandas as pd
|
7 |
+
from config import TMP_DIR, TAB_CONFIG, MODE
|
8 |
+
|
9 |
+
|
10 |
+
def clean_cache(dir_path=TMP_DIR):
|
11 |
+
if os.path.exists(dir_path):
|
12 |
+
shutil.rmtree(dir_path)
|
13 |
+
|
14 |
+
if not os.path.exists(dir_path):
|
15 |
+
os.makedirs(dir_path)
|
16 |
+
|
17 |
+
|
18 |
+
def encoder_json(file_path: str):
|
19 |
+
with open(file_path, "r", encoding="utf-8") as file:
|
20 |
+
data_list = list(json.load(file))
|
21 |
+
|
22 |
+
return data_list
|
23 |
+
|
24 |
+
|
25 |
+
def encoder_jsonl(file_path: str):
|
26 |
+
data_list = []
|
27 |
+
with open(file_path, "r", encoding="utf-8") as file:
|
28 |
+
for line in file:
|
29 |
+
json_data = json.loads(line.strip())
|
30 |
+
data_list.append(json_data)
|
31 |
+
|
32 |
+
return data_list
|
33 |
+
|
34 |
+
|
35 |
+
def encoder_csv(file_path: str):
|
36 |
+
data_list = []
|
37 |
+
with open(file_path, "r", encoding="utf-8") as file:
|
38 |
+
csv_reader = csv.DictReader(file)
|
39 |
+
for row in csv_reader:
|
40 |
+
data_list.append(dict(row))
|
41 |
+
|
42 |
+
return data_list
|
43 |
+
|
44 |
+
|
45 |
+
def decoder_json(data_list: list, file_path=f"{TMP_DIR}/output.json"):
|
46 |
+
if data_list:
|
47 |
+
with open(file_path, "w", encoding="utf-8") as file:
|
48 |
+
json.dump(data_list, file, ensure_ascii=False, indent=4)
|
49 |
+
|
50 |
+
return file_path
|
51 |
+
|
52 |
+
|
53 |
+
def decoder_csv(data_list: list, file_path=f"{TMP_DIR}/output.csv"):
|
54 |
+
if data_list:
|
55 |
+
header = list(data_list[0].keys())
|
56 |
+
with open(file_path, "w", newline="", encoding="utf-8") as file:
|
57 |
+
csv_writer = csv.writer(file)
|
58 |
+
csv_writer.writerow(header)
|
59 |
+
for item in data_list:
|
60 |
+
csv_writer.writerow([item[key] for key in header])
|
61 |
+
|
62 |
+
return file_path
|
63 |
+
|
64 |
+
|
65 |
+
def decoder_jsonl(data_list: list, file_path=f"{TMP_DIR}/output.jsonl"):
|
66 |
+
if data_list:
|
67 |
+
with open(file_path, "w", encoding="utf-8") as file:
|
68 |
+
for data in data_list:
|
69 |
+
json_line = json.dumps(data, ensure_ascii=False)
|
70 |
+
file.write(json_line + "\n")
|
71 |
+
|
72 |
+
return file_path
|
73 |
+
|
74 |
+
|
75 |
+
def change_mode(input: str):
|
76 |
+
affix = input.split(" ")
|
77 |
+
if affix[1] == "→":
|
78 |
+
MODE["from"] = affix[0]
|
79 |
+
MODE["to"] = affix[2]
|
80 |
+
|
81 |
+
else:
|
82 |
+
MODE["from"] = affix[2]
|
83 |
+
MODE["to"] = affix[0]
|
84 |
+
|
85 |
+
|
86 |
+
def infer(input_file: str):
|
87 |
+
clean_cache()
|
88 |
+
try:
|
89 |
+
data_list = eval(f'encoder_{MODE["from"]}')(input_file)
|
90 |
+
output_file = eval(f'decoder_{MODE["to"]}')(data_list)
|
91 |
+
return output_file, pd.DataFrame(data_list)
|
92 |
+
|
93 |
+
except Exception as e:
|
94 |
+
return None, pd.DataFrame([{"Please upload a standard data file": f"{e}"}])
|
95 |
+
|
96 |
+
|
97 |
+
def data_converter():
|
98 |
+
with gr.Blocks() as data:
|
99 |
+
for item in TAB_CONFIG:
|
100 |
+
types = item.split(" ⇆ ")
|
101 |
+
with gr.Tab(item) as tab:
|
102 |
+
with gr.Row():
|
103 |
+
with gr.Column():
|
104 |
+
option = gr.Dropdown(
|
105 |
+
choices=[
|
106 |
+
f"{types[0]} → {types[1]}",
|
107 |
+
f"{types[0]} ← {types[1]}",
|
108 |
+
],
|
109 |
+
label="Mode",
|
110 |
+
value=f"{types[0]} → {types[1]}",
|
111 |
+
)
|
112 |
+
input_file = gr.components.File(
|
113 |
+
type="filepath",
|
114 |
+
label="Upload input file",
|
115 |
+
file_types=[f".{types[0]}", f".{types[1]}"],
|
116 |
+
)
|
117 |
+
convert_btn = gr.Button("Convert")
|
118 |
+
|
119 |
+
with gr.Column():
|
120 |
+
output_file = gr.components.File(
|
121 |
+
type="filepath", label="Download output file"
|
122 |
+
)
|
123 |
+
data_viewer = gr.Dataframe(label="Data viewer")
|
124 |
+
|
125 |
+
option.change(change_mode, inputs=option)
|
126 |
+
tab.select(change_mode, inputs=option)
|
127 |
+
convert_btn.click(
|
128 |
+
infer, inputs=input_file, outputs=[output_file, data_viewer]
|
129 |
+
)
|
130 |
+
|
131 |
+
gr.Markdown(
|
132 |
+
"""
|
133 |
+
## Supported JSON format
|
134 |
+
```
|
135 |
+
[
|
136 |
+
{
|
137 |
+
"key1": "val11",
|
138 |
+
"key2": "val12",
|
139 |
+
...
|
140 |
+
},
|
141 |
+
{
|
142 |
+
"key1": "val21",
|
143 |
+
"key2": "val22",
|
144 |
+
...
|
145 |
+
},
|
146 |
+
...
|
147 |
+
]
|
148 |
+
```
|
149 |
+
## Supported jsonl format
|
150 |
+
```
|
151 |
+
{"key1": "val11", "key2": "val12", ...}
|
152 |
+
{"key1": "val21", "key2": "val22", ...}
|
153 |
+
...
|
154 |
+
```
|
155 |
+
## Supported CSV format
|
156 |
+
```
|
157 |
+
key1, key2, ...
|
158 |
+
val11, val12, ...
|
159 |
+
val21, val22, ...
|
160 |
+
...
|
161 |
+
```
|
162 |
+
"""
|
163 |
+
)
|
164 |
+
|
165 |
+
return data
|
exif.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import imghdr
|
3 |
+
import shutil
|
4 |
+
import hashlib
|
5 |
+
import zipfile
|
6 |
+
import exifread
|
7 |
+
import gradio as gr
|
8 |
+
import pandas as pd
|
9 |
+
from PIL import Image
|
10 |
+
from config import TMP_DIR
|
11 |
+
|
12 |
+
|
13 |
+
def get_exif_data(origin_file_path):
|
14 |
+
with open(origin_file_path, "rb") as image_file:
|
15 |
+
tags = exifread.process_file(image_file)
|
16 |
+
|
17 |
+
output = ""
|
18 |
+
for key in tags.keys():
|
19 |
+
value = str(tags[key])
|
20 |
+
output += "{0}:{1}\n".format(key, value)
|
21 |
+
|
22 |
+
return output
|
23 |
+
|
24 |
+
|
25 |
+
def clear_exif_data(image_path: str, img_mode=None, outdir=""):
|
26 |
+
save_path = f"{TMP_DIR}/{outdir}output." + image_path.split(".")[-1]
|
27 |
+
try:
|
28 |
+
img = Image.open(image_path)
|
29 |
+
data = list(img.getdata())
|
30 |
+
if img_mode:
|
31 |
+
save_path = (
|
32 |
+
f"{TMP_DIR}/{outdir}{hashlib.md5(image_path.encode()).hexdigest()}.jpg"
|
33 |
+
)
|
34 |
+
else:
|
35 |
+
img_mode = img.mode
|
36 |
+
|
37 |
+
img_without_exif = Image.new(img_mode, img.size)
|
38 |
+
img_without_exif.putdata(data)
|
39 |
+
img_without_exif.save(save_path)
|
40 |
+
|
41 |
+
except Exception as e:
|
42 |
+
print(f"\n{image_path} Error: {e}")
|
43 |
+
|
44 |
+
return save_path
|
45 |
+
|
46 |
+
|
47 |
+
def unzip_file(zip_path: str, extract_to=f"{TMP_DIR}/inputs"):
|
48 |
+
if not os.path.exists(extract_to):
|
49 |
+
os.makedirs(extract_to)
|
50 |
+
|
51 |
+
# 打开ZIP文件
|
52 |
+
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
53 |
+
# 解压文件
|
54 |
+
zip_ref.extractall(extract_to)
|
55 |
+
|
56 |
+
return extract_to
|
57 |
+
|
58 |
+
|
59 |
+
def find_files(directory: str):
|
60 |
+
found_files = []
|
61 |
+
for root, _, files in os.walk(directory):
|
62 |
+
for file in files:
|
63 |
+
fpath = os.path.join(root, file).replace("\\", "/")
|
64 |
+
if imghdr.what(fpath) != None:
|
65 |
+
found_files.append(fpath)
|
66 |
+
|
67 |
+
return found_files
|
68 |
+
|
69 |
+
|
70 |
+
def compress(folder_path=f"{TMP_DIR}/outputs", zip_file_path=f"{TMP_DIR}/outputs.zip"):
|
71 |
+
if not os.path.exists(folder_path):
|
72 |
+
print(f"Error: Folder '{folder_path}' does not exist.")
|
73 |
+
return
|
74 |
+
|
75 |
+
with zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_DEFLATED) as zipf:
|
76 |
+
for root, _, files in os.walk(folder_path):
|
77 |
+
for file in files:
|
78 |
+
file_path = os.path.join(root, file)
|
79 |
+
relative_path = os.path.relpath(file_path, folder_path)
|
80 |
+
zipf.write(
|
81 |
+
file_path,
|
82 |
+
arcname=os.path.join(os.path.basename(folder_path), relative_path),
|
83 |
+
)
|
84 |
+
|
85 |
+
return zip_file_path
|
86 |
+
|
87 |
+
|
88 |
+
def infer(image_path: str, original_ext: bool):
|
89 |
+
if not image_path or imghdr.what(image_path) == None:
|
90 |
+
return None, "Please input a picture!"
|
91 |
+
|
92 |
+
if os.path.exists(TMP_DIR):
|
93 |
+
shutil.rmtree(TMP_DIR)
|
94 |
+
|
95 |
+
os.makedirs(TMP_DIR, exist_ok=True)
|
96 |
+
return clear_exif_data(
|
97 |
+
image_path, img_mode="RGB" if not original_ext else None
|
98 |
+
), get_exif_data(image_path)
|
99 |
+
|
100 |
+
|
101 |
+
def batch_infer(imgs_zip: str, original_ext: bool):
|
102 |
+
if not imgs_zip:
|
103 |
+
return None, pd.DataFrame([{"Warning": "Please upload pictures zip!"}])
|
104 |
+
|
105 |
+
if os.path.exists(TMP_DIR):
|
106 |
+
shutil.rmtree(TMP_DIR)
|
107 |
+
|
108 |
+
os.makedirs(f"{TMP_DIR}/outputs", exist_ok=True)
|
109 |
+
extract_to = unzip_file(imgs_zip)
|
110 |
+
imgs = find_files(extract_to)
|
111 |
+
mode = "RGB" if not original_ext else None
|
112 |
+
exifs = []
|
113 |
+
for img in imgs:
|
114 |
+
clear_exif_data(img, img_mode=mode, outdir="outputs/")
|
115 |
+
exifs.append({"filename": os.path.basename(img), "exif": get_exif_data(img)})
|
116 |
+
|
117 |
+
if not exifs:
|
118 |
+
exifs = [{"Warning": "No picture in the zip"}]
|
119 |
+
|
120 |
+
return compress(), pd.DataFrame(exifs)
|
121 |
+
|
122 |
+
|
123 |
+
def clexif():
|
124 |
+
with gr.Blocks() as iface:
|
125 |
+
with gr.Tab("Process single picture"):
|
126 |
+
gr.Interface(
|
127 |
+
fn=infer,
|
128 |
+
inputs=[
|
129 |
+
gr.File(
|
130 |
+
label="Upload picture",
|
131 |
+
file_types=[
|
132 |
+
".jpg",
|
133 |
+
".jpeg",
|
134 |
+
".tiff",
|
135 |
+
".cr2",
|
136 |
+
".nef",
|
137 |
+
".orf",
|
138 |
+
".sr2",
|
139 |
+
".heic",
|
140 |
+
".avif",
|
141 |
+
],
|
142 |
+
),
|
143 |
+
gr.Checkbox(
|
144 |
+
label="Export original format",
|
145 |
+
value=False,
|
146 |
+
),
|
147 |
+
],
|
148 |
+
outputs=[
|
149 |
+
gr.Image(
|
150 |
+
label="Download cleaned picture",
|
151 |
+
type="filepath",
|
152 |
+
show_share_button=False,
|
153 |
+
),
|
154 |
+
gr.Textbox(label="EXIF", show_copy_button=True),
|
155 |
+
],
|
156 |
+
allow_flagging="never",
|
157 |
+
)
|
158 |
+
|
159 |
+
with gr.Tab("Batch processor"):
|
160 |
+
gr.Interface(
|
161 |
+
fn=batch_infer,
|
162 |
+
inputs=[
|
163 |
+
gr.File(
|
164 |
+
label="Upload pictures zip",
|
165 |
+
file_types=[".zip"],
|
166 |
+
),
|
167 |
+
gr.Checkbox(
|
168 |
+
label="Export original format",
|
169 |
+
value=False,
|
170 |
+
),
|
171 |
+
],
|
172 |
+
outputs=[
|
173 |
+
gr.File(
|
174 |
+
label="Download cleaned pictures",
|
175 |
+
type="filepath",
|
176 |
+
),
|
177 |
+
gr.Dataframe(label="EXIF list"),
|
178 |
+
],
|
179 |
+
description="When uploading pictures zip, please make sure the zip is completely uploaded before clicking Submit",
|
180 |
+
allow_flagging="never",
|
181 |
+
)
|
182 |
+
|
183 |
+
return iface
|
gif.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import math
|
3 |
+
import shutil
|
4 |
+
import gradio as gr
|
5 |
+
from PIL import Image, ImageSequence
|
6 |
+
from moviepy.editor import VideoFileClip
|
7 |
+
from config import TMP_DIR
|
8 |
+
|
9 |
+
|
10 |
+
def clean_cache(tmp_dir=TMP_DIR):
|
11 |
+
if os.path.exists(tmp_dir):
|
12 |
+
shutil.rmtree(tmp_dir)
|
13 |
+
|
14 |
+
os.mkdir(tmp_dir)
|
15 |
+
return f"{tmp_dir}/input.gif"
|
16 |
+
|
17 |
+
|
18 |
+
def get_frame_duration(gif: Image):
|
19 |
+
duration = gif.info.get("duration", 100)
|
20 |
+
return [
|
21 |
+
frame.info.get("duration", duration) for frame in ImageSequence.Iterator(gif)
|
22 |
+
]
|
23 |
+
|
24 |
+
|
25 |
+
def resize_gif(
|
26 |
+
target_width: int,
|
27 |
+
target_height: int,
|
28 |
+
input_path=f"{TMP_DIR}/input.gif",
|
29 |
+
output_path=f"{TMP_DIR}/output.gif",
|
30 |
+
):
|
31 |
+
# Open the GIF image
|
32 |
+
gif = Image.open(input_path)
|
33 |
+
# Create a list to hold the modified frames
|
34 |
+
modified_frames = []
|
35 |
+
# Loop through each frame of the GIF
|
36 |
+
for frame in ImageSequence.Iterator(gif):
|
37 |
+
# Resize the frame
|
38 |
+
resized_frame = frame.resize((target_width, target_height), Image.LANCZOS)
|
39 |
+
# Append the resized frame to the list
|
40 |
+
modified_frames.append(resized_frame)
|
41 |
+
|
42 |
+
frame_durations = get_frame_duration(gif)
|
43 |
+
modified_frames[0].save(
|
44 |
+
output_path,
|
45 |
+
format="GIF",
|
46 |
+
append_images=modified_frames[1:],
|
47 |
+
save_all=True,
|
48 |
+
duration=frame_durations,
|
49 |
+
loop=0,
|
50 |
+
)
|
51 |
+
|
52 |
+
return output_path
|
53 |
+
|
54 |
+
|
55 |
+
def infer(video_path: str, speed: float):
|
56 |
+
target_w = 640
|
57 |
+
gif_path = clean_cache()
|
58 |
+
try:
|
59 |
+
with VideoFileClip(video_path, audio_fps=16000) as clip:
|
60 |
+
if clip.duration > 5:
|
61 |
+
raise ValueError("The uploaded video is too long")
|
62 |
+
|
63 |
+
# clip.write_gif(gif_path, fps=12, progress_bar=True)
|
64 |
+
clip.speedx(speed).to_gif(gif_path, fps=12)
|
65 |
+
w, h = clip.size
|
66 |
+
|
67 |
+
target_h = math.ceil(target_w * h / w)
|
68 |
+
return os.path.basename(video_path), resize_gif(target_w, target_h)
|
69 |
+
|
70 |
+
except Exception as e:
|
71 |
+
return f"{e}", None
|
72 |
+
|
73 |
+
|
74 |
+
def video2gif():
|
75 |
+
return gr.Interface(
|
76 |
+
fn=infer,
|
77 |
+
inputs=[
|
78 |
+
gr.Video(label="Upload video"),
|
79 |
+
gr.Slider(
|
80 |
+
label="Speed",
|
81 |
+
minimum=0.5,
|
82 |
+
maximum=2.0,
|
83 |
+
step=0.25,
|
84 |
+
value=1.0,
|
85 |
+
),
|
86 |
+
],
|
87 |
+
outputs=[
|
88 |
+
gr.Textbox(label="Filename", show_copy_button=True),
|
89 |
+
gr.Image(label="Download GIF", type="filepath", show_share_button=False),
|
90 |
+
],
|
91 |
+
description="Please make sure the video is completely uploaded before clicking Submit, you can crop it online first if the video size is >5s",
|
92 |
+
flagging_mode="never",
|
93 |
+
)
|
github.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
|
6 |
+
def create_github_release(
|
7 |
+
owner: str,
|
8 |
+
repo: str,
|
9 |
+
token: str,
|
10 |
+
release_tag: str,
|
11 |
+
release_name: str,
|
12 |
+
release_description: str,
|
13 |
+
files: list,
|
14 |
+
):
|
15 |
+
try:
|
16 |
+
# 创建 Release
|
17 |
+
release_response = requests.post(
|
18 |
+
f"https://api.github.com/repos/{owner}/{repo}/releases",
|
19 |
+
headers={
|
20 |
+
"Authorization": f"token {token}",
|
21 |
+
"Accept": "application/vnd.github.v3+json",
|
22 |
+
},
|
23 |
+
json={
|
24 |
+
"tag_name": release_tag,
|
25 |
+
"name": release_name,
|
26 |
+
"body": release_description,
|
27 |
+
"draft": False,
|
28 |
+
"prerelease": False,
|
29 |
+
},
|
30 |
+
)
|
31 |
+
|
32 |
+
if release_response.status_code != 201:
|
33 |
+
return f"Failed to create release: {release_response.status_code}, {release_response.json()}"
|
34 |
+
|
35 |
+
# 获取上传 URL
|
36 |
+
release = release_response.json()
|
37 |
+
upload_url = release["upload_url"].split("{")[0]
|
38 |
+
|
39 |
+
# 上传多个二进制文件
|
40 |
+
results = []
|
41 |
+
for file_path in files:
|
42 |
+
file_name = os.path.basename(file_path)
|
43 |
+
with open(file_path, "rb") as binary_file:
|
44 |
+
upload_response = requests.post(
|
45 |
+
f"{upload_url}?name={file_name}",
|
46 |
+
headers={
|
47 |
+
"Authorization": f"token {token}",
|
48 |
+
"Content-Type": "application/octet-stream",
|
49 |
+
},
|
50 |
+
data=binary_file,
|
51 |
+
)
|
52 |
+
|
53 |
+
if upload_response.status_code == 201:
|
54 |
+
results.append(f"Binary file '{file_name}' uploaded successfully!")
|
55 |
+
else:
|
56 |
+
results.append(
|
57 |
+
f"Failed to upload binary file '{file_name}': {upload_response.status_code}, {upload_response.json()}"
|
58 |
+
)
|
59 |
+
|
60 |
+
return "\n".join(results)
|
61 |
+
|
62 |
+
except Exception as e:
|
63 |
+
return f"Release failed: {e}"
|
64 |
+
|
65 |
+
|
66 |
+
def github_release_creator():
|
67 |
+
return gr.Interface(
|
68 |
+
fn=create_github_release,
|
69 |
+
inputs=[
|
70 |
+
gr.Textbox(
|
71 |
+
label="GitHub Owner",
|
72 |
+
placeholder="username / organization name",
|
73 |
+
),
|
74 |
+
gr.Textbox(label="GitHub Repo", placeholder="repo name"),
|
75 |
+
gr.Textbox(
|
76 |
+
label="GitHub Token",
|
77 |
+
placeholder="personal access token",
|
78 |
+
type="password",
|
79 |
+
),
|
80 |
+
gr.Textbox(label="Release Tag", placeholder="v1.0.0"),
|
81 |
+
gr.Textbox(label="Release Name", placeholder="My New Release"),
|
82 |
+
gr.TextArea(
|
83 |
+
label="Describe this release",
|
84 |
+
placeholder="Release with binary file(s) and source code.",
|
85 |
+
),
|
86 |
+
gr.File(label="Binary File(s)", file_count="multiple"),
|
87 |
+
],
|
88 |
+
outputs=gr.TextArea(label="Status", show_copy_button=True),
|
89 |
+
description="Upload binary file(s) to create a new GitHub release.",
|
90 |
+
flagging_mode="never",
|
91 |
+
)
|
qr.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
|
4 |
+
def infer(img_size: int, input_txt: str):
|
5 |
+
if not input_txt:
|
6 |
+
return None
|
7 |
+
|
8 |
+
return f"https://api.qrserver.com/v1/create-qr-code/?size={img_size}x{img_size}&data={input_txt}"
|
9 |
+
|
10 |
+
|
11 |
+
def qrcode():
|
12 |
+
return gr.Interface(
|
13 |
+
fn=infer,
|
14 |
+
inputs=[
|
15 |
+
gr.Slider(35, 1000, 217, label="Image size"),
|
16 |
+
gr.Textbox(label="Input text"),
|
17 |
+
],
|
18 |
+
outputs=gr.Image(label="QR code", show_share_button=False),
|
19 |
+
description="Enter text to generate a QR code.",
|
20 |
+
flagging_mode="never",
|
21 |
+
)
|
rct.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import csv
|
3 |
+
import random
|
4 |
+
import shutil
|
5 |
+
import pandas as pd
|
6 |
+
import gradio as gr
|
7 |
+
from config import TMP_DIR
|
8 |
+
|
9 |
+
|
10 |
+
def list_to_csv(list_of_dicts: list, filename: str):
|
11 |
+
keys = dict(list_of_dicts[0]).keys()
|
12 |
+
with open(filename, "w", newline="", encoding="utf-8") as csvfile:
|
13 |
+
writer = csv.DictWriter(csvfile, fieldnames=keys)
|
14 |
+
writer.writeheader()
|
15 |
+
for data in list_of_dicts:
|
16 |
+
writer.writerow(data)
|
17 |
+
|
18 |
+
return filename
|
19 |
+
|
20 |
+
|
21 |
+
def random_allocation(participants: int, ratio: list):
|
22 |
+
total = sum(ratio)
|
23 |
+
splits = [0]
|
24 |
+
for i, r in enumerate(ratio):
|
25 |
+
splits.append(splits[i] + int(1.0 * r / total * participants))
|
26 |
+
|
27 |
+
splits[-1] = participants
|
28 |
+
partist = list(range(1, participants + 1))
|
29 |
+
random.shuffle(partist)
|
30 |
+
allocation = []
|
31 |
+
groups = len(ratio)
|
32 |
+
for i in range(groups):
|
33 |
+
start = splits[i]
|
34 |
+
end = splits[i + 1]
|
35 |
+
for participant in partist[start:end]:
|
36 |
+
allocation.append({"id": participant, "group": i + 1})
|
37 |
+
|
38 |
+
sorted_data = sorted(allocation, key=lambda x: x["id"])
|
39 |
+
filename = list_to_csv(sorted_data, f"{TMP_DIR}/output.csv")
|
40 |
+
return filename, pd.DataFrame(sorted_data)
|
41 |
+
|
42 |
+
|
43 |
+
def infer(participants: float, ratios: str):
|
44 |
+
if os.path.exists(TMP_DIR):
|
45 |
+
shutil.rmtree(TMP_DIR)
|
46 |
+
|
47 |
+
os.makedirs(TMP_DIR, exist_ok=True)
|
48 |
+
ratio_list = ratios.split(":")
|
49 |
+
ratio = []
|
50 |
+
try:
|
51 |
+
for r in ratio_list:
|
52 |
+
current_ratio = float(r.strip())
|
53 |
+
if current_ratio > 0:
|
54 |
+
ratio.append(current_ratio)
|
55 |
+
|
56 |
+
except Exception:
|
57 |
+
print("Invalid input of ratio!")
|
58 |
+
|
59 |
+
return random_allocation(int(participants), ratio)
|
60 |
+
|
61 |
+
|
62 |
+
def rct_generator():
|
63 |
+
return gr.Interface(
|
64 |
+
fn=infer,
|
65 |
+
inputs=[
|
66 |
+
gr.Number(label="Number of participants", value=10),
|
67 |
+
gr.Textbox(label="Grouping ratio", value="8:1:1"),
|
68 |
+
],
|
69 |
+
outputs=[
|
70 |
+
gr.File(label="Download data CSV"),
|
71 |
+
gr.Dataframe(label="Data preview"),
|
72 |
+
],
|
73 |
+
description="Enter the number of participants and the grouping ratio in the format of numbers separated by : to generate randomized controlled trial.",
|
74 |
+
flagging_mode="never",
|
75 |
+
)
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tqdm
|
2 |
+
Pillow
|
3 |
+
exifread
|
4 |
+
requests
|
5 |
+
moviepy==1.0.3
|
smtp.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import gradio as gr
|
3 |
+
from config import API_SMTP
|
4 |
+
|
5 |
+
|
6 |
+
def send_email(
|
7 |
+
target: str,
|
8 |
+
title: str,
|
9 |
+
content: str,
|
10 |
+
name: str,
|
11 |
+
email: str,
|
12 |
+
password: str,
|
13 |
+
host: str,
|
14 |
+
port: int,
|
15 |
+
):
|
16 |
+
response = requests.get(
|
17 |
+
API_SMTP,
|
18 |
+
params={
|
19 |
+
"host": host,
|
20 |
+
"Port": port,
|
21 |
+
"key": password, # apikey
|
22 |
+
"email": email, # from
|
23 |
+
"mail": target, # to
|
24 |
+
"title": title, # subject
|
25 |
+
"name": name, # nickname
|
26 |
+
"text": content, # content
|
27 |
+
},
|
28 |
+
)
|
29 |
+
if response.status_code == 200:
|
30 |
+
result: dict = response.json()
|
31 |
+
return result.get("status")
|
32 |
+
|
33 |
+
else:
|
34 |
+
return f"Request failed with status code: {response.status_code}"
|
35 |
+
|
36 |
+
|
37 |
+
def smtp_tester():
|
38 |
+
return gr.Interface(
|
39 |
+
fn=send_email,
|
40 |
+
inputs=[
|
41 |
+
gr.Textbox(label="To email"),
|
42 |
+
gr.Textbox(label="Title", value="Test title"),
|
43 |
+
gr.TextArea(label="Content", value="Test content"),
|
44 |
+
gr.Textbox(label="Sender name", value="Test nickname"),
|
45 |
+
gr.Textbox(label="From email"),
|
46 |
+
gr.Textbox(label="API key"),
|
47 |
+
gr.Textbox(label="SMTP host", value="smtp.163.com"),
|
48 |
+
gr.Slider(label="Port", minimum=0, maximum=65535, step=1, value=25),
|
49 |
+
],
|
50 |
+
outputs=gr.Textbox(label="status", show_copy_button=True),
|
51 |
+
description="SMTP Online Test Tool",
|
52 |
+
flagging_mode="never",
|
53 |
+
)
|
trans.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import requests
|
4 |
+
import gradio as gr
|
5 |
+
from config import API_TRANS, KEY_TRANS
|
6 |
+
|
7 |
+
|
8 |
+
def translate(source, direction):
|
9 |
+
if not source or not direction:
|
10 |
+
return "Please enter valid text and select the mode!"
|
11 |
+
|
12 |
+
# WARNING, this token is a test token for new developers, and it should be replaced by your token
|
13 |
+
payload = {
|
14 |
+
"source": source,
|
15 |
+
"trans_type": direction,
|
16 |
+
"request_id": "demo",
|
17 |
+
"detect": True,
|
18 |
+
}
|
19 |
+
headers = {
|
20 |
+
"content-type": "application/json",
|
21 |
+
"x-authorization": f"token {KEY_TRANS}",
|
22 |
+
}
|
23 |
+
try:
|
24 |
+
response = requests.request(
|
25 |
+
"POST",
|
26 |
+
API_TRANS,
|
27 |
+
data=json.dumps(payload),
|
28 |
+
headers=headers,
|
29 |
+
)
|
30 |
+
|
31 |
+
return json.loads(response.text)["target"]
|
32 |
+
|
33 |
+
except Exception as e:
|
34 |
+
return f"{e}"
|
35 |
+
|
36 |
+
|
37 |
+
def translator():
|
38 |
+
return gr.Interface(
|
39 |
+
fn=translate,
|
40 |
+
inputs=[
|
41 |
+
gr.TextArea(label="Input text area", placeholder="Type the text here..."),
|
42 |
+
gr.Dropdown(choices=["auto2en", "auto2zh", "auto2ja"], label="Mode"),
|
43 |
+
],
|
44 |
+
outputs=gr.TextArea(label="Translation results", show_copy_button=True),
|
45 |
+
flagging_mode="never",
|
46 |
+
examples=[
|
47 |
+
["彩云小译は最高の翻訳サービスです", "auto2en"],
|
48 |
+
["Lingocloud is the best translation service.", "auto2zh"],
|
49 |
+
],
|
50 |
+
cache_examples=False,
|
51 |
+
)
|
url.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import json
|
3 |
+
import requests
|
4 |
+
import gradio as gr
|
5 |
+
from config import HEADER
|
6 |
+
|
7 |
+
|
8 |
+
def is_valid_url(url):
|
9 |
+
pattern = re.compile(
|
10 |
+
r"^(https?://)?" r"([a-zA-Z0-9-]+\.)+[a-zA-Z]{2,}" r"(:\d+)?" r"(/[^ ]*)?$"
|
11 |
+
)
|
12 |
+
return bool(pattern.match(url))
|
13 |
+
|
14 |
+
|
15 |
+
def noxlink(longUrl: str):
|
16 |
+
domain = "https://noxlink.net"
|
17 |
+
api = f"{domain}/zh-CN/shorten"
|
18 |
+
payload = {"longUrl": longUrl}
|
19 |
+
try:
|
20 |
+
response = requests.post(api, json=payload, headers=HEADER)
|
21 |
+
if response.status_code == 200:
|
22 |
+
retcode = json.loads(response.text)
|
23 |
+
if retcode["success"]:
|
24 |
+
return f"{domain}/" + retcode["message"]
|
25 |
+
|
26 |
+
return response.text
|
27 |
+
|
28 |
+
except Exception as e:
|
29 |
+
return f"{e}"
|
30 |
+
|
31 |
+
|
32 |
+
def monojson(longUrl: str):
|
33 |
+
api = "https://monojson.com/api/short-link"
|
34 |
+
payload = {"url": longUrl}
|
35 |
+
try:
|
36 |
+
response = requests.post(api, json=payload, headers=HEADER)
|
37 |
+
if response.status_code == 200:
|
38 |
+
return json.loads(response.text)["shortUrl"]
|
39 |
+
|
40 |
+
else:
|
41 |
+
return response.text
|
42 |
+
|
43 |
+
except Exception as e:
|
44 |
+
return f"{e}"
|
45 |
+
|
46 |
+
|
47 |
+
def infer(longUrl: str, tool: str):
|
48 |
+
shortUrl = ""
|
49 |
+
if tool == "monojson":
|
50 |
+
shortUrl = monojson(longUrl)
|
51 |
+
elif tool == "noxlink":
|
52 |
+
shortUrl = noxlink(longUrl)
|
53 |
+
else:
|
54 |
+
shortUrl = "Please select an API provider!"
|
55 |
+
|
56 |
+
if is_valid_url(shortUrl):
|
57 |
+
return f'<a href="{shortUrl}" target="_blank">{shortUrl}</a>'
|
58 |
+
|
59 |
+
return shortUrl
|
60 |
+
|
61 |
+
|
62 |
+
def url_shortner():
|
63 |
+
return gr.Interface(
|
64 |
+
fn=infer,
|
65 |
+
inputs=[
|
66 |
+
gr.Textbox(label="Input a long URL", placeholder="Input a long URL"),
|
67 |
+
gr.Dropdown(
|
68 |
+
choices=["noxlink", "monojson"],
|
69 |
+
label="Select an API provider",
|
70 |
+
value="noxlink",
|
71 |
+
),
|
72 |
+
],
|
73 |
+
outputs=[
|
74 |
+
gr.HTML(
|
75 |
+
container=True,
|
76 |
+
show_label=True,
|
77 |
+
label="Output short URL",
|
78 |
+
)
|
79 |
+
],
|
80 |
+
description="Convert long urls into short, easy-to-share links",
|
81 |
+
flagging_mode="never",
|
82 |
+
examples=[
|
83 |
+
["https://www.bing.com", "noxlink"],
|
84 |
+
["https://www.baidu.com", "monojson"],
|
85 |
+
],
|
86 |
+
cache_examples=False,
|
87 |
+
)
|