将Microsoft Project Oxford语音识别从Objective-C转换为SWIFT
Microsoft Project Oxford在iOS上有一个很好的语音识别API和Objective-C指令。 按照入门指导,我可以轻松构build它。 但是,我很难将其转换为Swift语言。
我首先创build了一个快速项目。 我创build了桥头文件(ProjectName-Bridging-Header.h)并将以下代码插入到此文件中:
#import "SpeechRecognitionService.h"
我想将Objective-C的头文件和实现文件转换成ViewController.swift。
ViewController.h的内容:
#import <UIKit/UIKit.h> #import "SpeechRecognitionService.h" @interface ViewController : UIViewController<SpeechRecognitionProtocol> { NSMutableString* textOnScreen; DataRecognitionClient* dataClient; MicrophoneRecognitionClient* micClient; SpeechRecognitionMode recoMode; bool isMicrophoneReco; bool isIntent; int waitSeconds; } @property (nonatomic, strong) IBOutlet UIButton* startButton; /* In our UI, we have a text box to show the reco results.*/ @property (nonatomic, strong) IBOutlet UITextView* quoteText; /* Action for pressing the "Start" button */ -(IBAction)startButtonTapped:(id)sender; @end
ViewController.m的内容:
#import "ViewController.h" #import <AVFoundation/AVAudioSession.h> @interface ViewController (/*private*/) /* Create a recognition request to interact with the Speech Service.*/ -(void)initializeRecoClient; @end NSString* ConvertSpeechRecoConfidenceEnumToString(Confidence confidence); /* The Main App */ @implementation ViewController /* Initialization to be done when app starts. */ -(void)viewDidLoad { [super viewDidLoad]; textOnScreen = [NSMutableString stringWithCapacity: 1000]; recoMode = SpeechRecognitionMode_ShortPhrase; isMicrophoneReco = true; isIntent = false; waitSeconds = recoMode == SpeechRecognitionMode_ShortPhrase ? 20 : 200; [self initializeRecoClient]; } /* Called when a partial response is received. */ -(void)onPartialResponseReceived:(NSString*) response { dispatch_async(dispatch_get_main_queue(), ^{ [textOnScreen appendFormat:(@"%@\n"), response]; self.quoteText.text = response; }); } /* Called when a final response is received. */ -(void)onFinalResponseReceived:(RecognitionResult*)response { bool isFinalDicationMessage = recoMode == SpeechRecognitionMode_LongDictation && (response.RecognitionStatus == RecognitionStatus_EndOfDictation || response.RecognitionStatus == RecognitionStatus_DictationEndSilenceTimeout); if (isMicrophoneReco && ((recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage)) { [micClient endMicAndRecognition]; } if ((recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage) { dispatch_async(dispatch_get_main_queue(), ^{ [[self startButton] setEnabled:YES]; }); } } NSString* ConvertSpeechErrorToString(int errorCode) { switch ((SpeechClientStatus)errorCode) { case SpeechClientStatus_SecurityFailed: return @"SpeechClientStatus_SecurityFailed"; case SpeechClientStatus_LoginFailed: return @"SpeechClientStatus_LoginFailed"; case SpeechClientStatus_Timeout: return @"SpeechClientStatus_Timeout"; case SpeechClientStatus_ConnectionFailed: return @"SpeechClientStatus_ConnectionFailed"; case SpeechClientStatus_NameNotFound: return @"SpeechClientStatus_NameNotFound"; case SpeechClientStatus_InvalidService: return @"SpeechClientStatus_InvalidService"; case SpeechClientStatus_InvalidProxy: return @"SpeechClientStatus_InvalidProxy"; case SpeechClientStatus_BadResponse: return @"SpeechClientStatus_BadResponse"; case SpeechClientStatus_InternalError: return @"SpeechClientStatus_InternalError"; case SpeechClientStatus_AuthenticationError: return @"SpeechClientStatus_AuthenticationError"; case SpeechClientStatus_AuthenticationExpired: return @"SpeechClientStatus_AuthenticationExpired"; case SpeechClientStatus_LimitsExceeded: return @"SpeechClientStatus_LimitsExceeded"; case SpeechClientStatus_AudioOutputFailed: return @"SpeechClientStatus_AudioOutputFailed"; case SpeechClientStatus_MicrophoneInUse: return @"SpeechClientStatus_MicrophoneInUse"; case SpeechClientStatus_MicrophoneUnavailable: return @"SpeechClientStatus_MicrophoneUnavailable"; case SpeechClientStatus_MicrophoneStatusUnknown:return @"SpeechClientStatus_MicrophoneStatusUnknown"; case SpeechClientStatus_InvalidArgument: return @"SpeechClientStatus_InvalidArgument"; } return [[NSString alloc] initWithFormat:@"Unknown error: %d\n", errorCode]; } /* Called when an error is received. */ -(void)onError:(NSString*)errorMessage withErrorCode:(int)errorCode { dispatch_async(dispatch_get_main_queue(), ^{ [[self startButton] setEnabled:YES]; [textOnScreen appendString:(@"********* Error Detected *********\n")]; [textOnScreen appendFormat:(@"%@ %@\n"), errorMessage, ConvertSpeechErrorToString(errorCode)]; self.quoteText.text = textOnScreen; }); } /* Event fired when the microphone recording status has changed. */ -(void)onMicrophoneStatus:(Boolean)recording { if (!recording) { [micClient endMicAndRecognition]; } dispatch_async(dispatch_get_main_queue(), ^{ if (!recording) { [[self startButton] setEnabled:YES]; } self.quoteText.text = textOnScreen; }); } /* Create a recognition request to interact with the Speech Recognition Service.*/ -(void)initializeRecoClient { NSString* language = @"en-us"; NSString* path = [[NSBundle mainBundle] pathForResource:@"settings" ofType:@"plist"]; NSDictionary* settings = [[NSDictionary alloc] initWithContentsOfFile:path]; NSString* primaryOrSecondaryKey = [settings objectForKey:(@"primaryKey")]; NSString* luisAppID = [settings objectForKey:(@"luisAppID")]; NSString* luisSubscriptionID = [settings objectForKey:(@"luisSubscriptionID")]; if (isMicrophoneReco) { if (!isIntent) { micClient = [SpeechRecognitionServiceFactory createMicrophoneClient:(recoMode) withLanguage:(language) withKey:(primaryOrSecondaryKey) withProtocol:(self)]; } else { MicrophoneRecognitionClientWithIntent* micIntentClient; micIntentClient = [SpeechRecognitionServiceFactory createMicrophoneClientWithIntent:(language) withKey:(primaryOrSecondaryKey) withLUISAppID:(luisAppID) withLUISSecret:(luisSubscriptionID) withProtocol:(self)]; micClient = micIntentClient; } } else { if (!isIntent) { dataClient = [SpeechRecognitionServiceFactory createDataClient:(recoMode) withLanguage:(language) withKey:(primaryOrSecondaryKey) withProtocol:(self)]; } else { DataRecognitionClientWithIntent* dataIntentClient; dataIntentClient = [SpeechRecognitionServiceFactory createDataClientWithIntent:(language) withKey:(primaryOrSecondaryKey) withLUISAppID:(luisAppID) withLUISSecret:(luisSubscriptionID) withProtocol:(self)]; dataClient = dataIntentClient; } } } /* Take enum value and produce NSString */ NSString* ConvertSpeechRecoConfidenceEnumToString(Confidence confidence) { switch (confidence) { case SpeechRecoConfidence_None: return @"None"; case SpeechRecoConfidence_Low: return @"Low"; case SpeechRecoConfidence_Normal: return @"Normal"; case SpeechRecoConfidence_High: return @"High"; } } /* Action for pressing the "Start" button */ -(IBAction)startButtonTapped:(id)sender { [textOnScreen setString:(@"")]; self.quoteText.text = textOnScreen; [[self startButton] setEnabled:NO]; if (isMicrophoneReco) { OSStatus status = [micClient startMicAndRecognition]; if (status) { [textOnScreen appendFormat:(@"Error starting audio. %@\n"), ConvertSpeechErrorToString(status)]; } } } /* Action for low memory */ -(void)didReceiveMemoryWarning { [super didReceiveMemoryWarning]; } @end
我是ios编程的新手。 我将不胜感激任何帮助。 谢谢。
-
请把你的objective-c视图控制器转换成swift。 不要通过桥接头来导入它。
2.使用以前在objective-c版本中使用的新转换类
3.只需在桥接头中导入帧工作头文件。
将Objective-C代码转换为swift使用swiftify
编辑
这是转换的代码
两个文件都是组合的
class ViewController: UIViewController, SpeechRecognitionProtocol { //variable declaration. var textOnScreen: NSMutableString var dataClient: DataRecognitionClient var micClient: MicrophoneRecognitionClient var recoMode: SpeechRecognitionMode var isMicrophoneReco: Bool var isIntent: Bool var waitSeconds: Int //IBOutlets @IBOutlet var startButton: UIButton! /* In our UI, we have a text box to show the reco results.*/ @IBOutlet var startButton: UITextView! //IBAction /* Action for pressing the "Start" button */ @IBAction func startButtonTapped(sender: AnyObject) { textOnScreen.string = ("") self.quoteText.text = textOnScreen self.startButton().enabled = false if isMicrophoneReco { var status: OSStatus = micClient.startMicAndRecognition() if status != nil { textOnScreen.appendFormat(("Error starting audio. %@\n"), ConvertSpeechErrorToString(status)) } } } /* Initialization to be done when app starts. */ override func viewDidLoad() { super.viewDidLoad() textOnScreen = NSMutableString(capacity: 1000) recoMode = SpeechRecognitionMode_ShortPhrase isMicrophoneReco = true isIntent = false waitSeconds = recoMode == SpeechRecognitionMode_ShortPhrase ? 20 : 200 self.initializeRecoClient() } /* Action for low memory */ override func didReceiveMemoryWarning() { super.didReceiveMemoryWarning() } /* Called when a partial response is received. */ func onPartialResponseReceived(response: String) { dispatch_async(dispatch_get_main_queue(), {() -> Void in textOnScreen.appendFormat(("%@\n"), response) self.quoteText.text = response }) } /* Called when a final response is received. */ func onFinalResponseReceived(response: RecognitionResult) { var isFinalDicationMessage: Bool = recoMode == SpeechRecognitionMode_LongDictation && (response.RecognitionStatus == RecognitionStatus_EndOfDictation || response.RecognitionStatus == RecognitionStatus_DictationEndSilenceTimeout) if isMicrophoneReco && ((recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage) { micClient.endMicAndRecognition() } if (recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage { dispatch_async(dispatch_get_main_queue(), {() -> Void in self.startButton().enabled = true }) } } func ConvertSpeechErrorToString( errorCode :Int) -> String { switch errorCode as! SpeechClientStatus { case SpeechClientStatus_SecurityFailed: return "SpeechClientStatus_SecurityFailed" case SpeechClientStatus_LoginFailed: return "SpeechClientStatus_LoginFailed" case SpeechClientStatus_Timeout: return "SpeechClientStatus_Timeout" case SpeechClientStatus_ConnectionFailed: return "SpeechClientStatus_ConnectionFailed" case SpeechClientStatus_NameNotFound: return "SpeechClientStatus_NameNotFound" case SpeechClientStatus_InvalidService: return "SpeechClientStatus_InvalidService" case SpeechClientStatus_InvalidProxy: return "SpeechClientStatus_InvalidProxy" case SpeechClientStatus_BadResponse: return "SpeechClientStatus_BadResponse" case SpeechClientStatus_InternalError: return "SpeechClientStatus_InternalError" case SpeechClientStatus_AuthenticationError: return "SpeechClientStatus_AuthenticationError" case SpeechClientStatus_AuthenticationExpired: return "SpeechClientStatus_AuthenticationExpired" case SpeechClientStatus_LimitsExceeded: return "SpeechClientStatus_LimitsExceeded" case SpeechClientStatus_AudioOutputFailed: return "SpeechClientStatus_AudioOutputFailed" case SpeechClientStatus_MicrophoneInUse: return "SpeechClientStatus_MicrophoneInUse" case SpeechClientStatus_MicrophoneUnavailable: return "SpeechClientStatus_MicrophoneUnavailable" case SpeechClientStatus_MicrophoneStatusUnknown: return "SpeechClientStatus_MicrophoneStatusUnknown" case SpeechClientStatus_InvalidArgument: return "SpeechClientStatus_InvalidArgument" } return String(format: "Unknown error: %d\n", errorCode) } /* Called when an error is received. */ func onError(errorMessage: String, withErrorCode errorCode: Int) { dispatch_async(dispatch_get_main_queue(), {() -> Void in self.startButton().enabled = true textOnScreen.appendString(("********* Error Detected *********\n")) textOnScreen.appendFormat(("%@ %@\n"), errorMessage, ConvertSpeechErrorToString(errorCode)) self.quoteText.text = textOnScreen }) } /* Event fired when the microphone recording status has changed. */ func onMicrophoneStatus(recording: Boolean) { if !recording { micClient.endMicAndRecognition() } dispatch_async(dispatch_get_main_queue(), {() -> Void in if !recording { self.startButton().enabled = true } self.quoteText.text = textOnScreen }) } func ConvertSpeechRecoConfidenceEnumToString( confidence:Confidence) -> String { switch confidence { case SpeechRecoConfidence_None: return "None" case SpeechRecoConfidence_Low: return "Low" case SpeechRecoConfidence_Normal: return "Normal" case SpeechRecoConfidence_High: return "High" } } /* Create a recognition request to interact with the Speech Recognition Service.*/ override func initializeRecoClient() { var language: String = "en-us" var path: String = NSBundle.mainBundle().pathForResource("settings", ofType: "plist") var settings: [NSObject : AnyObject] = [NSObject : AnyObject](contentsOfFile: path) var primaryOrSecondaryKey: String = (settings[("primaryKey")] as! String) var luisAppID: String = (settings[("luisAppID")] as! String) var luisSubscriptionID: String = (settings[("luisSubscriptionID")] as! String) if isMicrophoneReco { if !isIntent { micClient = SpeechRecognitionServiceFactory.createMicrophoneClient(withLanguage as! recoMode, : withKey as! language, : withProtocol as! primaryOrSecondaryKey, : (self)) } else { var micIntentClient: MicrophoneRecognitionClientWithIntent micIntentClient = SpeechRecognitionServiceFactory.createMicrophoneClientWithIntent(withKey as! language, : withLUISAppID as! primaryOrSecondaryKey, : withLUISSecret as! luisAppID, : withProtocol as! luisSubscriptionID, : (self)) micClient = micIntentClient } } else if !isIntent { dataClient = SpeechRecognitionServiceFactory.createDataClient(withLanguage as! recoMode, : withKey as! language, : withProtocol as! primaryOrSecondaryKey, : (self)) } else { var dataIntentClient: DataRecognitionClientWithIntent dataIntentClient = SpeechRecognitionServiceFactory.createDataClientWithIntent(withKey as! language, : withLUISAppID as! primaryOrSecondaryKey, : withLUISSecret as! luisAppID, : withProtocol as! luisSubscriptionID, : (self)) dataClient = dataIntentClient } } }