在视频上叠加图像会降低视频分辨率
当我在视频上叠加图像时,视频质量会大大降低.如果我未设置导出会话的视频组成或将导出质量设置为直通,则视频质量会很好(但是我显然没有覆盖).
When I overlay an image on my video, the video quality is greatly reduced. If I don't set the video composition of the export session or set the export quality to passthrough, the video quality is great (but I get no overlays obviously).
我也传递了本地.mov视频网址来添加叠加层. 我正在使用PHPhotoLibrary将视频保存到相机胶卷中. 使用其他一些功能来转换视频并设置其说明.
I'm passing in a local .mov video url to add the overlays too. I'm using PHPhotoLibrary to save the video to the camera roll. Using some other functions to transform the video and set its instructions.
这一切看起来都非常简单,但是却有损视频质量
It all seems pretty straightforward, but something is killing the video quality
func merge3(url: URL) {
let firstAsset = AVAsset(url: url)
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
// 2 - Create two video tracks
guard
let firstTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
else {
return
}
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration),
of: firstAsset.tracks(withMediaType: AVMediaType.video)[0],
at: CMTime.zero)
} catch {
print("Failed to load first track")
return
}
let s = UIScreen.main.bounds
let imglogo = UIImage(named: "django")?.scaleImageToSize(newSize: CGSize(width: 250, height: 125))
let imglayer = CALayer()
imglayer.contents = imglogo?.cgImage
imglayer.frame = CGRect(x: s.width / 2 - 125, y: s.height / 2 - 67.5
, width: 250, height: 125)
imglayer.opacity = 1.0
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: s.width, height: s.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: s.width, height: s.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(imglayer)
// 2.1
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero,
duration: firstAsset.duration)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
layercomposition.renderSize = CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
// instruction for watermark
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration)
_ = mixComposition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = VideoHelper.videoCompositionInstruction1(firstTrack, asset: firstAsset)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
// 4 - Get path
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory,
in: .userDomainMask).first else {
return
}
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = documentDirectory.appendingPathComponent("mergeVideo-\(date).mov")
// 5 - Create Exporter
guard let exporter = AVAssetExportSession(asset: mixComposition,
presetName: AVAssetExportPresetHighestQuality) else {
return
}
exporter.outputURL = url
exporter.outputFileType = AVFileType.mov
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = layercomposition
// 6 - Perform the Export
exporter.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exporter)
}
}
}
func exportDidFinish(_ session: AVAssetExportSession) {
guard
session.status == AVAssetExportSession.Status.completed,
let outputURL = session.outputURL
else {
return
}
let saveVideoToPhotos = {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL)
}) { saved, error in
let success = saved && (error == nil)
let title = success ? "Success" : "Error"
let message = success ? "Video saved" : "Failed to save video"
let alert = UIAlertController(title: title, message: message, preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "OK", style: UIAlertAction.Style.cancel, handler: nil))
self.present(alert, animated: true, completion: nil)
}
}
// Ensure permission to access Photo Library
if PHPhotoLibrary.authorizationStatus() != .authorized {
PHPhotoLibrary.requestAuthorization { status in
if status == .authorized {
saveVideoToPhotos()
}
}
} else {
saveVideoToPhotos()
}
}
static func videoCompositionInstruction1(_ track: AVCompositionTrack, asset: AVAsset)
-> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait { // not hit
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor), at: CMTime.zero)
} else { // hit
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
.concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 4))
if assetInfo.orientation == .down { // not hit
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: CMTime.zero)
}
return instruction
}
static func orientationFromTransform(_ transform: CGAffineTransform)
-> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
您正在设置
layercomposition.renderSize = CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
应该是
layercomposition.renderSize = yourAsset.tracks(withMediaType: AVMediaTypeVideo)[0].naturalSize
第一个操作是将分辨率设置为屏幕尺寸,而不是原始视频的实际尺寸.第二个会更正它,以设置原始视频的分辨率.
What the first one does is set your resolution to your screen size and not the actual size of the original video. The 2nd one corrects it to setting the resolution of the original video.
以这种方式考虑-您不希望分辨率成为屏幕的大小-确实很小.您需要原始视频的大小或一些常用视频设置的大小.
Think of it this way - you don't want your resolution to be the size of your screen - that would be really small. You want it the size of some original video or the size of some commonly used video setting.