John6666 commited on
Commit
bfde28e
·
verified ·
1 Parent(s): 5ab86cf

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -175
app.py CHANGED
@@ -1,176 +1,176 @@
1
- import gradio as gr
2
- from random import randint
3
- from all_models import models
4
-
5
- from externalmod import gr_Interface_load
6
-
7
- import asyncio
8
- import os
9
- from threading import RLock
10
- lock = RLock()
11
- HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
12
-
13
-
14
- def load_fn(models):
15
- global models_load
16
- models_load = {}
17
-
18
- for model in models:
19
- if model not in models_load.keys():
20
- try:
21
- m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
22
- except Exception as error:
23
- print(error)
24
- m = gr.Interface(lambda: None, ['text'], ['image'])
25
- models_load.update({model: m})
26
-
27
-
28
- load_fn(models)
29
-
30
-
31
- num_models = 6
32
- MAX_SEED = 3999999999
33
- default_models = models[:num_models]
34
- inference_timeout = 600
35
- starting_seed = randint(1941, 2024)
36
-
37
- def extend_choices(choices):
38
- return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
39
-
40
-
41
- def update_imgbox(choices):
42
- choices_plus = extend_choices(choices[:num_models])
43
- return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
44
-
45
- def gen_fn(model_str, prompt):
46
- if model_str == 'NA':
47
- return None
48
- noise = str('') #str(randint(0, 99999999999))
49
- return models_load[model_str](f'{prompt} {noise}')
50
-
51
- async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
52
- from pathlib import Path
53
- kwargs = {}
54
- noise = ""
55
- kwargs["seed"] = seed
56
- task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
57
- prompt=f'{prompt} {noise}', **kwargs))
58
- await asyncio.sleep(0)
59
- try:
60
- result = await asyncio.wait_for(task, timeout=timeout)
61
- except (Exception, asyncio.TimeoutError) as e:
62
- print(e)
63
- print(f"Task timed out: {model_str}")
64
- if not task.done(): task.cancel()
65
- result = None
66
- if task.done() and result is not None:
67
- with lock:
68
- png_path = "image.png"
69
- result.save(png_path)
70
- image = str(Path(png_path).resolve())
71
- return image
72
- return None
73
-
74
- def gen_fnseed(model_str, prompt, seed=1):
75
- if model_str == 'NA':
76
- return None
77
- try:
78
- loop = asyncio.new_event_loop()
79
- result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
80
- except (Exception, asyncio.CancelledError) as e:
81
- print(e)
82
- print(f"Task aborted: {model_str}")
83
- result = None
84
- with lock:
85
- image = "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
86
- result = image
87
- finally:
88
- loop.close()
89
- return result
90
-
91
- css="""
92
- .wrapper img {font-size: 98% !important; white-space: nowrap !important; text-align: center !important;
93
- display: inline-block !important;}
94
- """
95
-
96
- with gr.Blocks(css=css) as demo:
97
- with gr.Tab('Mini Toy World'):
98
- txt_input = gr.Textbox(label='Your prompt:', lines=4)
99
- gen_button = gr.Button('Generate up to 6 images in up to 3 minutes total')
100
- #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
101
- gen_button.click(lambda s: gr.update(interactive = True), None)
102
- gr.HTML(
103
- """
104
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
105
- <div>
106
- <body>
107
- <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images and select models.</p>
108
- </div>
109
- </body>
110
- </div>
111
- </div>
112
- """
113
- )
114
- with gr.Row():
115
- output = [gr.Image(label = m, min_width=480) for m in default_models]
116
- current_models = [gr.Textbox(m, visible = False) for m in default_models]
117
-
118
- for m, o in zip(current_models, output):
119
- gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
120
- inputs=[m, txt_input], outputs=[o], concurrency_limit=None, queue=False)
121
- #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
122
- with gr.Accordion('Model selection'):
123
- model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
124
- #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
125
- model_choice.change(update_imgbox, model_choice, output)
126
- model_choice.change(extend_choices, model_choice, current_models)
127
- with gr.Row():
128
- gr.HTML(
129
- """
130
- <div class="footer">
131
- <p> Based on the <a href="https://huggingface.co/spaces/John6666/hfd_test_nostopbutton">Huggingface NoStopButton</a> Space by John6666, <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier! For 6 images with the same model check out the <a href="https://huggingface.co/spaces/Yntec/PrintingPress">Printing Press</a>, for the classic UI with prompt enhancer try <a href="https://huggingface.co/spaces/Yntec/blitz_diffusion">Blitz Diffusion!</a>
132
- </p>
133
- """
134
- )
135
- with gr.Tab('🌱 Use seeds!'):
136
- txt_inputseed = gr.Textbox(label='Your prompt:', lines=4)
137
- gen_buttonseed = gr.Button('Generate up to 6 images with the same seed in up to 3 minutes total')
138
- seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 3999999999)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
139
- #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
140
- gen_buttonseed.click(lambda s: gr.update(interactive = True), None)
141
- gr.HTML(
142
- """
143
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
144
- <div>
145
- <body>
146
- <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images and select models.</p>
147
- </div>
148
- </body>
149
- </div>
150
- </div>
151
- """
152
- )
153
- with gr.Row():
154
- output = [gr.Image(label = m, min_width=480) for m in default_models]
155
- current_models = [gr.Textbox(m, visible = False) for m in default_models]
156
-
157
- for m, o in zip(current_models, output):
158
- gen_eventseed = gr.on(triggers=[gen_buttonseed.click, txt_inputseed.submit], fn=gen_fnseed,
159
- inputs=[m, txt_inputseed, seed], outputs=[o], concurrency_limit=None, queue=False)
160
- #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
161
- with gr.Accordion('Model selection'):
162
- model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
163
- #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
164
- model_choice.change(update_imgbox, model_choice, output)
165
- model_choice.change(extend_choices, model_choice, current_models)
166
- with gr.Row():
167
- gr.HTML(
168
- """
169
- <div class="footer">
170
- <p> Based on the <a href="https://huggingface.co/spaces/John6666/hfd_test_nostopbutton">Huggingface NoStopButton</a> Space by John6666, <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier! For 6 images with the same model check out the <a href="https://huggingface.co/spaces/Yntec/PrintingPress">Printing Press</a>, for the classic UI with prompt enhancer try <a href="https://huggingface.co/spaces/Yntec/blitz_diffusion">Blitz Diffusion!</a>
171
- </p>
172
- """
173
- )
174
-
175
- demo.queue(default_concurrency_limit=200, max_size=200)
176
  demo.launch(show_api=False, max_threads=400)
 
1
+ import gradio as gr
2
+ from random import randint
3
+ from all_models import models
4
+
5
+ from externalmod import gr_Interface_load
6
+
7
+ import asyncio
8
+ import os
9
+ from threading import RLock
10
+ lock = RLock()
11
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
12
+
13
+
14
+ def load_fn(models):
15
+ global models_load
16
+ models_load = {}
17
+
18
+ for model in models:
19
+ if model not in models_load.keys():
20
+ try:
21
+ m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
22
+ except Exception as error:
23
+ print(error)
24
+ m = gr.Interface(lambda: None, ['text'], ['image'])
25
+ models_load.update({model: m})
26
+
27
+
28
+ load_fn(models)
29
+
30
+
31
+ num_models = 6
32
+ MAX_SEED = 3999999999
33
+ default_models = models[:num_models]
34
+ inference_timeout = 600
35
+ starting_seed = randint(1941, 2024)
36
+
37
+ def extend_choices(choices):
38
+ return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
39
+
40
+
41
+ def update_imgbox(choices):
42
+ choices_plus = extend_choices(choices[:num_models])
43
+ return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
44
+
45
+ def gen_fn(model_str, prompt):
46
+ if model_str == 'NA':
47
+ return None
48
+ noise = str('') #str(randint(0, 99999999999))
49
+ return models_load[model_str](f'{prompt} {noise}')
50
+
51
+ async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
52
+ from pathlib import Path
53
+ kwargs = {}
54
+ noise = ""
55
+ kwargs["seed"] = seed
56
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
57
+ prompt=f'{prompt} {noise}', **kwargs))
58
+ await asyncio.sleep(0)
59
+ try:
60
+ result = await asyncio.wait_for(task, timeout=timeout)
61
+ except (Exception, asyncio.TimeoutError) as e:
62
+ print(e)
63
+ print(f"Task timed out: {model_str}")
64
+ if not task.done(): task.cancel()
65
+ result = None
66
+ if task.done() and result is not None:
67
+ with lock:
68
+ png_path = "image.png"
69
+ result.save(png_path)
70
+ image = str(Path(png_path).resolve())
71
+ return image
72
+ return None
73
+
74
+ def gen_fnseed(model_str, prompt, seed=1):
75
+ if model_str == 'NA':
76
+ return None
77
+ try:
78
+ loop = asyncio.new_event_loop()
79
+ result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
80
+ except (Exception, asyncio.CancelledError) as e:
81
+ print(e)
82
+ print(f"Task aborted: {model_str}")
83
+ result = None
84
+ with lock:
85
+ image = "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
86
+ result = image
87
+ finally:
88
+ loop.close()
89
+ return result
90
+
91
+ css="""
92
+ .wrapper img {font-size: 98% !important; white-space: nowrap !important; text-align: center !important;
93
+ display: inline-block !important;}
94
+ """
95
+
96
+ with gr.Blocks(css=css) as demo:
97
+ with gr.Tab('Mini Toy World'):
98
+ txt_input = gr.Textbox(label='Your prompt:', lines=4)
99
+ gen_button = gr.Button('Generate up to 6 images in up to 3 minutes total')
100
+ #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
101
+ gen_button.click(lambda s: gr.update(interactive = True), None)
102
+ gr.HTML(
103
+ """
104
+ <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
105
+ <div>
106
+ <body>
107
+ <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images and select models.</p>
108
+ </div>
109
+ </body>
110
+ </div>
111
+ </div>
112
+ """
113
+ )
114
+ with gr.Row():
115
+ output = [gr.Image(label = m, min_width=480) for m in default_models]
116
+ current_models = [gr.Textbox(m, visible = False) for m in default_models]
117
+
118
+ for m, o in zip(current_models, output):
119
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
120
+ inputs=[m, txt_input], outputs=[o], concurrency_limit=None, queue=False)
121
+ #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
122
+ with gr.Accordion('Model selection'):
123
+ model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
124
+ #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
125
+ model_choice.change(update_imgbox, model_choice, output)
126
+ model_choice.change(extend_choices, model_choice, current_models)
127
+ with gr.Row():
128
+ gr.HTML(
129
+ """
130
+ <div class="footer">
131
+ <p> Based on the <a href="https://huggingface.co/spaces/John6666/hfd_test_nostopbutton">Huggingface NoStopButton</a> Space by John6666, <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier! For 6 images with the same model check out the <a href="https://huggingface.co/spaces/Yntec/PrintingPress">Printing Press</a>, for the classic UI with prompt enhancer try <a href="https://huggingface.co/spaces/Yntec/blitz_diffusion">Blitz Diffusion!</a>
132
+ </p>
133
+ """
134
+ )
135
+ with gr.Tab('🌱 Use seeds!'):
136
+ txt_inputseed = gr.Textbox(label='Your prompt:', lines=4)
137
+ gen_buttonseed = gr.Button('Generate up to 6 images with the same seed in up to 3 minutes total')
138
+ seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 3999999999)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
139
+ #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
140
+ gen_buttonseed.click(lambda s: gr.update(interactive = True), None)
141
+ gr.HTML(
142
+ """
143
+ <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
144
+ <div>
145
+ <body>
146
+ <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images and select models.</p>
147
+ </div>
148
+ </body>
149
+ </div>
150
+ </div>
151
+ """
152
+ )
153
+ with gr.Row():
154
+ output = [gr.Image(label = m, min_width=480) for m in default_models]
155
+ current_models = [gr.Textbox(m, visible = False) for m in default_models]
156
+
157
+ for m, o in zip(current_models, output):
158
+ gen_eventseed = gr.on(triggers=[gen_buttonseed.click, txt_inputseed.submit], fn=gen_fnseed,
159
+ inputs=[m, txt_inputseed, seed], outputs=[o], concurrency_limit=None, queue=False)
160
+ #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
161
+ with gr.Accordion('Model selection'):
162
+ model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
163
+ #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
164
+ model_choice.change(update_imgbox, model_choice, output)
165
+ model_choice.change(extend_choices, model_choice, current_models)
166
+ with gr.Row():
167
+ gr.HTML(
168
+ """
169
+ <div class="footer">
170
+ <p> Based on the <a href="https://huggingface.co/spaces/John6666/hfd_test_nostopbutton">Huggingface NoStopButton</a> Space by John6666, <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier! For 6 images with the same model check out the <a href="https://huggingface.co/spaces/Yntec/PrintingPress">Printing Press</a>, for the classic UI with prompt enhancer try <a href="https://huggingface.co/spaces/Yntec/blitz_diffusion">Blitz Diffusion!</a>
171
+ </p>
172
+ """
173
+ )
174
+
175
+ demo.queue(default_concurrency_limit=200, max_size=200)
176
  demo.launch(show_api=False, max_threads=400)