exact-railcar commited on
Commit
029ea00
·
verified ·
1 Parent(s): ff1487b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio
2
 
3
- import basicsr, realesrgan, gfpgan, av, pathlib, diffusers, torch, transformers, builtins, numpy, re
4
  from animatediff.generate import controlnet_preprocess, img2img_preprocess, wild_card_conversion, region_preprocess, unload_controlnet_models
5
  from animatediff.settings import get_model_config, get_infer_config
6
  from animatediff.utils.pipeline import send_to_device
@@ -128,16 +128,13 @@ music = pipeline(prompt='Light rhythm techno', negative_prompt='low quality, ave
128
  del pipeline
129
  torch.cuda.empty_cache()
130
 
131
- model = basicsr.archs.rrdbnet_arch.RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
132
- upsampler = realesrgan.RealESRGANer(scale=4, model_path='https://huggingface.co/chaowenguoback/pal/resolve/main/RealESRGAN_x4plus.pth', model=model, half=True, device='cuda')
133
- face_enhancer = gfpgan.GFPGANer(model_path='https://huggingface.co/chaowenguoback/pal/resolve/main/GFPGANv1.4.pth',upscale=4, bg_upsampler=upsampler)
134
  with av.open('video.mp4', mode='w') as writer:
135
  video = writer.add_stream('h264', rate=8)
136
  video.width = width * 4
137
  video.height = height * 4
138
  video.pix_fmt = 'yuv420p'
139
  audio = writer.add_stream('aac', rate=16000)
140
- for frame in frames: writer.mux(video.encode(av.VideoFrame.from_ndarray(face_enhancer.enhance(frame)[-1])))
141
  writer.mux(video.encode())
142
  for _ in builtins.range(0, music.shape[0], audio.frame_size):
143
  frame = av.AudioFrame.from_ndarray(music[_:_ + audio.frame_size][None], format='fltp', layout='mono')
 
1
  import gradio
2
 
3
+ import av, pathlib, diffusers, torch, transformers, builtins, numpy, re
4
  from animatediff.generate import controlnet_preprocess, img2img_preprocess, wild_card_conversion, region_preprocess, unload_controlnet_models
5
  from animatediff.settings import get_model_config, get_infer_config
6
  from animatediff.utils.pipeline import send_to_device
 
128
  del pipeline
129
  torch.cuda.empty_cache()
130
 
 
 
 
131
  with av.open('video.mp4', mode='w') as writer:
132
  video = writer.add_stream('h264', rate=8)
133
  video.width = width * 4
134
  video.height = height * 4
135
  video.pix_fmt = 'yuv420p'
136
  audio = writer.add_stream('aac', rate=16000)
137
+ for frame in frames: writer.mux(video.encode(av.VideoFrame.from_ndarray(frame)))
138
  writer.mux(video.encode())
139
  for _ in builtins.range(0, music.shape[0], audio.frame_size):
140
  frame = av.AudioFrame.from_ndarray(music[_:_ + audio.frame_size][None], format='fltp', layout='mono')