zRzRzRzRzRzRzR commited on
Commit
a8cf837
β€’
1 Parent(s): 3cd8cde

example images

Browse files
app.py CHANGED
@@ -65,10 +65,10 @@ pipe_image = CogVideoXImageToVideoPipeline.from_pretrained(
65
  ).to(device)
66
 
67
 
68
- pipe.transformer.to(memory_format=torch.channels_last)
69
- pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
70
- pipe_image.transformer.to(memory_format=torch.channels_last)
71
- pipe_image.transformer = torch.compile(pipe_image.transformer, mode="max-autotune", fullgraph=True)
72
 
73
  os.makedirs("./output", exist_ok=True)
74
  os.makedirs("./gradio_tmp", exist_ok=True)
@@ -294,7 +294,8 @@ def delete_old_files():
294
 
295
 
296
  threading.Thread(target=delete_old_files, daemon=True).start()
297
- examples = [["horse.mp4"], ["kitten.mp4"], ["train_running.mp4"]]
 
298
 
299
  with gr.Blocks() as demo:
300
  gr.Markdown("""
@@ -320,10 +321,11 @@ with gr.Blocks() as demo:
320
  with gr.Column():
321
  with gr.Accordion("I2V: Image Input (cannot be used simultaneously with video input)", open=False):
322
  image_input = gr.Image(label="Input Image (will be cropped to 720 * 480)")
 
323
  with gr.Accordion("V2V: Video Input (cannot be used simultaneously with image input)", open=False):
324
  video_input = gr.Video(label="Input Video (will be cropped to 49 frames, 6 seconds at 8fps)")
325
  strength = gr.Slider(0.1, 1.0, value=0.8, step=0.01, label="Strength")
326
- examples_component = gr.Examples(examples, inputs=[video_input], cache_examples=False)
327
  prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5)
328
 
329
  with gr.Row():
 
65
  ).to(device)
66
 
67
 
68
+ # pipe.transformer.to(memory_format=torch.channels_last)
69
+ # pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
70
+ # pipe_image.transformer.to(memory_format=torch.channels_last)
71
+ # pipe_image.transformer = torch.compile(pipe_image.transformer, mode="max-autotune", fullgraph=True)
72
 
73
  os.makedirs("./output", exist_ok=True)
74
  os.makedirs("./gradio_tmp", exist_ok=True)
 
294
 
295
 
296
  threading.Thread(target=delete_old_files, daemon=True).start()
297
+ examples_videos = [["example_videos/ehorse.mp4"], ["example_videos/kitten.mp4"], ["example_videos/train_running.mp4"]]
298
+ examples_images = [["example_images/beach.png"], ["example_images/street.png"], ["example_images/camping.png"]]
299
 
300
  with gr.Blocks() as demo:
301
  gr.Markdown("""
 
321
  with gr.Column():
322
  with gr.Accordion("I2V: Image Input (cannot be used simultaneously with video input)", open=False):
323
  image_input = gr.Image(label="Input Image (will be cropped to 720 * 480)")
324
+ examples_component_images = gr.Examples(examples_images, inputs=[examples_images], cache_examples=False)
325
  with gr.Accordion("V2V: Video Input (cannot be used simultaneously with image input)", open=False):
326
  video_input = gr.Video(label="Input Video (will be cropped to 49 frames, 6 seconds at 8fps)")
327
  strength = gr.Slider(0.1, 1.0, value=0.8, step=0.01, label="Strength")
328
+ examples_component_videos = gr.Examples(examples_videos, inputs=[examples_videos], cache_examples=False)
329
  prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5)
330
 
331
  with gr.Row():
example_images/beach.png ADDED
example_images/camping.png ADDED
example_images/street.png ADDED
horse.mp4 β†’ example_videos/horse.mp4 RENAMED
File without changes
kitten.mp4 β†’ example_videos/kitten.mp4 RENAMED
File without changes
train_running.mp4 β†’ example_videos/train_running.mp4 RENAMED
File without changes