本アプリで編集が上手くいかない!!
やはり自作がいいのかな?
途中ですが、
特徴
音声
眠いので寝ます
import cv2
import sys,os,time
import pydub
import simpleaudio
class mySound:
def __init__( self, fn ):
self.sound0 = pydub.AudioSegment.from_file( fn + ".mp4" )
self.fn = fn
def play( self, start, end, speed=1 ):
self.sound = self.sound0[ start*1000: end*1000 ]
# //sound.export( fn + ".mp3" , format="mp3", bitrate="127k")
# //self.sound.export( fn + ".mp3" , format="mp3", bitrate="254k")
playObj = simpleaudio.play_buffer(
self.sound.raw_data,
num_channels = self.sound.channels,
bytes_per_sample= self.sound.sample_width,
# //sample_rate=int(sound.frame_rate*2)
sample_rate=int( self.sound.frame_rate*speed)
)
return playObj
def export( self, start, end ):
self.sound = self.sound0[ start*1000: end*1000 ]
self.sound.export( self.fn + ".mp3" , format="mp3", bitrate="254k")
def replay( self, tim ):
self.sound = self.sound0[ tim*1000:]
playObj = simpleaudio.play_buffer(
self.sound.raw_data,
num_channels =self.sound.channels,
bytes_per_sample=self.sound.sample_width,
# //sample_rate=int(sound.frame_rate*2)
sample_rate=int(self.sound.frame_rate)
)
return playObj
def dispInfo( img ):
dougaTm = frame_num * (1/fps)
mm = dougaTm /60
ss = dougaTm %60
ff = dougaTm %60
cv2.putText(img, "%02d:%02.02f" %( mm, ss ), (30,30), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,0), 2, 8)
cv2.putText(img, "f:%02d" %( frame_num), (30, 60), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,0), 2, 8)
cv2.putText(img, "sf:%02d" %( startFrame ), (30, 60+30), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2, 8)
cv2.putText(img, "ef:%02d" %( endFrame ), (30, 60+60), cv2.FONT_HERSHEY_COMPLEX, 1, (255,0,255), 2, 8)
def dispWin():
global playObj,snd,startSec, startTm
global img,frame_num
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num )
ret, img = cap.read()
frame_num = cap.get(cv2.CAP_PROP_POS_FRAMES)
print("frame_num", frame_num)
# //各情報の表示をおこなう
dispInfo( img )
cv2.imshow('Video', img )
startFrame = 0
endFrame = 0
playSW = False
recSW = False
def procPlay():
global playObj,snd,startSec, startTm
global img,frame_num
global startFrame,endFrame
global playSW, recSW, stopSW
key = ""
if endFrame <= frame_num:
playObj.stop()
stopSW = True
playSW = False
recSW = False
while True:
key = cv2.waitKey(0)
print("key",key)
if key & 0xFF == 49:
startFrame -= 5
print("startFrame=", startFrame )
dispWin()
if key & 0xFF == 50:
startFrame += 5
print("startFrame=", startFrame )
dispWin()
if key & 0xFF == 51:
endFrame -= 5
print("endFrame=", endFrame )
dispWin()
if key & 0xFF == 52:
endFrame += 5
print("endFrame=", endFrame )
dispWin()
if key & 0xFF == 27:
tim = ( (1/fps) * frame_num)
startSec = tim
startTm = time.time()
playObj = snd.replay( tim )
return key
if key & 0xFF == ord("p") or key & 0xFF == ord("w") :
if key & 0xFF == ord("w") :
recSW = True
else:
playSW = True
frame_num = startFrame
dispWin()
tim = ( (1/fps) * frame_num)
startSec = tim
startTm = time.time()
playObj = snd.replay( tim )
return key
return key
def inputKey( key ):
global playObj,snd,startSec, startTm
global img,frame_num
global startFrame,endFrame
global playSW, recSW, stopSW
stopSW = False
while True:
if key & 0xFF == ord(" "):
if stopSW:
tim = ( (1/fps) * frame_num)
startSec = tim
startTm = time.time()
playObj = snd.replay( tim )
return key
else:
playObj.stop()
stopSW = True
# //開始フレーム、終了フレームの設定
if key & 0xFF == ord("r"):
startFrame = frame_num
print("startFrame=", startFrame )
return key
if key & 0xFF == ord("s"):
endFrame = frame_num
print("endFrame=", endFrame )
return key
# //マークフレームの確認
if key & 0xFF == ord("p") or key & 0xFF == ord("w") :
playSW = True
playObj.stop()
stopSW = True
frame_num = startFrame
dispWin()
tim = ( (1/fps) * frame_num)
startSec = tim
startTm = time.time()
playObj = snd.replay( tim )
return key
if key & 0xFF == ord("b"):
playObj.stop()
stopSW = True
frame_num -= fps+1
dispWin()
if key & 0xFF == ord("f"):
playObj.stop()
stopSW = True
frame_num += fps-1
dispWin()
if key & 0xFF == 8:
playObj.stop()
stopSW = True
frame_num -= 2
dispWin()
if key & 0xFF == 13:
playObj.stop()
stopSW = True
# //frame_num += 2
dispWin()
# //再度、キー入力待ちをする
key = cv2.waitKey(0)
print("key===",key)
# //dispWin()
def wait( speed=1 ):
global playObj,snd,startSec, startTm
global img,frame_num
while True:
key = cv2.waitKey(1)
if key & 0xFF == ord("q"):
return key
sousaKey = [ord(" "), ord("b"), ord("f"),
ord("r"),
ord("s"),
ord("p"),
ord("w"),
8, 13 ]
if key in sousaKey:
key = inputKey( key )
return key
if playSW or recSW:
procPlay()
realTm = time.time() - startTm
# //print( "time.time()=", time.time() )
# //print( "realTm=", realTm )
tim = ( (1/fps) * frame_num / speed)
if startSec + realTm > tim:
return key
# //----------------------------------------------------------------
# //ここからスタート
# //----------------------------------------------------------------
fn = "man"
fn = "testCap"
startSec = 0
endSec = 200
#Video Source
cap = cv2.VideoCapture( fn + '.mp4') #自分のmp4のpathを入力
frame_num = cap.get(cv2.CAP_PROP_POS_FRAMES)
fps = cap.get(cv2.CAP_PROP_FPS)
# //出力ファイルの設定
fourcc = cv2.VideoWriter_fourcc( *'mp4v')
ret, img = cap.read()
h, w, channels = img.shape
rec = cv2.VideoWriter('douga.mp4', fourcc, fps, (w, h))
snd = mySound( fn )
playObj = snd.play( startSec, endSec )
startTm = time.time()
# //0秒は、0フレーム
# //1秒は、25フレーム
# //2秒は、50フレーム
pos =fps * startSec
cap.set(cv2.CAP_PROP_POS_FRAMES, pos )
# //----------------------------------------------------
# //メインループ
# //----------------------------------------------------
while True:
ret, img = cap.read()
frame_num = cap.get(cv2.CAP_PROP_POS_FRAMES)
# //終了秒数
tm = frame_num /fps
if tm >= endSec:
break
# //各情報の表示をおこなう
dispInfo( img )
cv2.imshow('Video', img )
if recSW:
img_out = img.copy()
rec.write(img_out)
print("rec!")
key = wait( )
if key & 0xFF == ord("q"):
snd.export( startFrame, endFrame )
break
# //フレームの移動(前に戻る)
if key & 0xFF == ord("b"):
frame_num -= 1
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num )
dispInfo( img )
rec.release()
# //playObj.stop()
sys.exit()
# //------------------------------------------------
# //ここでは、動画と音声の結合です
# //------------------------------------------------
import ffmpeg
def concat():
# //print("concat-1")
fn_in = "douga"
fn_in2= "man"
fn_out ="douga_out.mp4"
# //print("concat-2")
if os.path.isfile( fn_out ):
# //print("concat-3")
os.remove( fn_out )
# //print("concat-4")
instream1= ffmpeg.input( fn_in + ".mp4") # video
instream2= ffmpeg.input( fn_in2 + ".mp3") # sound
#コーデックと出力ファイル
stream = ffmpeg.output( instream1, instream2, fn_out, vcodec="copy", acodec="copy")
ffmpeg.run(stream, quiet=True)
concat()
sys.exit()
while True:
ret, img = cap.read()
img = cv2.resize(img, (frameWidth, frameHeight))
cv2.imshow('Video', img)
# //print('ret=', ret)
# qを押すと止まる。
if cv2.waitKey(50) & 0xFF == ord('q'):
break