Skip to content

Speech macOS xcode26.0 b1

Alex Soto edited this page Jun 9, 2025 · 1 revision

#Speech.framework

diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h	2025-05-24 04:29:46
@@ -13,22 +13,29 @@
 extern NSErrorDomain const SFSpeechErrorDomain
 API_AVAILABLE(macos(14), ios(17), tvos(18));
 
+/**
+ Error codes that can be thrown under the Speech framework's error domain.
+ */
 typedef NS_ERROR_ENUM (SFSpeechErrorDomain, SFSpeechErrorCode) {
-    /** Error may include `NSUnderlyingErrorKey` in `userInfo`.*/
+    /// There was an internal error.
     SFSpeechErrorCodeInternalServiceError = 1,
-    /** Failed to read audio file */
+
+    /// The audio file could not be read.
     SFSpeechErrorCodeAudioReadFailed = 2,
 
     // MARK: CustomLM data related errors
     
-    /** Templates were malformed */
+    /// The custom language model templates were malformed.
     SFSpeechErrorCodeUndefinedTemplateClassName = 7,
     
-    /** A custom language model file was malformed */
+    /// The custom language model file was malformed.
     SFSpeechErrorCodeMalformedSupplementalModel = 8,
     
-    /** Operation timed out */
-    SFSpeechErrorCodeTimeout = 10,
+    /// The operation timed out.
+    SFSpeechErrorCodeTimeout = 12,
+
+    /// A required parameter is missing/nil.
+    SFSpeechErrorCodeMissingParameter = 13,
 } API_AVAILABLE(macos(14), ios(17), tvos(18));
 
 NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h	2025-05-24 04:29:46
@@ -10,25 +10,66 @@
 
 NS_ASSUME_NONNULL_BEGIN
 
+/**
+ An object describing the location of a custom language model and specialized vocabulary.
+ 
+ Pass this object to ``SFSpeechLanguageModel/prepareCustomLanguageModelForUrl:configuration:completion:`` to indicate where that method should create the custom language model file, and to ``SFSpeechRecognitionRequest/customizedLanguageModel`` or ``DictationTranscriber/ContentHint/customizedLanguage(modelConfiguration:)`` to indicate where the system should find that model to use.
+ */
 API_AVAILABLE(ios(17), macos(14), tvos(18))
 NS_SWIFT_SENDABLE
 NS_SWIFT_NAME(SFSpeechLanguageModel.Configuration)
-@interface SFSpeechLanguageModelConfiguration : NSObject <NSCopying>
+@interface SFSpeechLanguageModelConfiguration : NSObject <NSCopying, NSSecureCoding>
 
+/** The location of a compiled language model file. */
 @property (nonatomic, readonly, copy) NSURL *languageModel;
+
+/** The location of a compiled vocabulary file. */
 @property (nonatomic, readonly, nullable, copy) NSURL *vocabulary;
 
+/** Creates a configuration with the location of a language model file. */
 - (instancetype)initWithLanguageModel:(NSURL *)languageModel;
+
+/** Creates a configuration with the locations of language model and vocabulary files. */
 - (instancetype)initWithLanguageModel:(NSURL *)languageModel vocabulary:(NSURL * __nullable)vocabulary;
 
 @end
 
+/**
+ A language model built from custom training data.
+ 
+ Create this object using ``SFSpeechLanguageModel/prepareCustomLanguageModelForUrl:configuration:completion:`` or ``SFSpeechLanguageModel/prepareCustomLanguageModelForUrl:configuration:ignoresCache:completion:``.
+ */
 API_AVAILABLE(ios(17), macos(14), tvos(18))
 @interface SFSpeechLanguageModel : NSObject
   
-+ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset clientIdentifier:(NSString *)clientIdentifier configuration:(SFSpeechLanguageModelConfiguration *)configuration completion:(void(^)(NSError * __nullable error))completion;
++ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset clientIdentifier:(NSString *)clientIdentifier configuration:(SFSpeechLanguageModelConfiguration *)configuration completion:(void(^)(NSError * __nullable error))completion
+API_DEPRECATED_WITH_REPLACEMENT("+prepareCustomLanguageModelForUrl:configuration:completion:",
+                                macos(14, 26.0), ios(17, 26.0), watchos(10, 26.0), tvos(18, 26.0));
 
-+ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset clientIdentifier:(NSString *)clientIdentifier configuration:(SFSpeechLanguageModelConfiguration *)configuration ignoresCache:(BOOL)ignoresCache completion:(void(^)(NSError * __nullable error))completion;
++ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset clientIdentifier:(NSString *)clientIdentifier configuration:(SFSpeechLanguageModelConfiguration *)configuration ignoresCache:(BOOL)ignoresCache completion:(void(^)(NSError * __nullable error))completion
+API_DEPRECATED_WITH_REPLACEMENT("+prepareCustomLanguageModelForUrl:configuration:ignoresCache:completion:",
+                                macos(14, 26.0), ios(17, 26.0), watchos(10, 26.0), tvos(18, 26.0));
+
+/**
+ Creates a language model from custom training data.
+ 
+ - Parameters:
+    - asset: The URL of a file containing custom training data. Create this file with ``SFCustomLanguageModelData/export(to:)``.
+    - configuration: An object listing the URLs at which this method should create the language model and compiled vocabulary from the training data.
+    - completion: Called when the language model has been created.
+ */
++ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset configuration:(SFSpeechLanguageModelConfiguration *)configuration completion:(void(^)(NSError * __nullable error))completion;
+
+/**
+ Creates a language model from custom training data.
+ 
+ - Parameters:
+    - asset: The URL of a file containing custom training data. Create this file with ``SFCustomLanguageModelData/export(to:)``.
+    - configuration: An object listing the URLs at which this method should create the language model and compiled vocabulary from the training data.
+    - ignoresCache: If `true`, the language model identified by the configuration will be recreated even if the `asset` file is unchanged.
+    - completion: Called when the language model has been created.
+ */
++ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset configuration:(SFSpeechLanguageModelConfiguration *)configuration ignoresCache:(BOOL)ignoresCache completion:(void(^)(NSError * __nullable error))completion;
 
 @end
 
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h	2025-05-24 04:29:46
@@ -10,21 +10,35 @@
 
 @class SFVoiceAnalytics;
 
+/**
+ The metadata of speech in the audio of a speech recognition request.
+ */
 API_AVAILABLE(ios(14.5), macos(11.3), tvos(18))
 @interface SFSpeechRecognitionMetadata : NSObject <NSCopying, NSSecureCoding>
 
-// Measures the number of words spoken per minute
+/**
+ The number of words spoken per minute.
+ */
 @property (nonatomic, readonly) double speakingRate;
 
-// Measures average pause between words (in seconds)
+/**
+ The average pause duration between words, measured in seconds.
+ */
 @property (nonatomic, readonly) NSTimeInterval averagePauseDuration;
 
-// Timestamp of start of speech in audio
+/**
+ The start timestamp of speech in the audio.
+ */
 @property (nonatomic, readonly) NSTimeInterval speechStartTimestamp;
 
-// Duration of speech in audio
+/**
+ The duration in seconds of speech in the audio.
+ */
 @property (nonatomic, readonly) NSTimeInterval speechDuration;
 
+/**
+ An analysis of the transcription segment's vocal properties.
+ */
 @property (nonatomic, nullable, readonly) SFVoiceAnalytics *voiceAnalytics;
 
 @end
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h	2025-05-29 01:59:17
@@ -12,60 +12,170 @@
 
 NS_ASSUME_NONNULL_BEGIN
 
-// A request for a speech recognition from an audio source
+/**
+ An abstract class that represents a request to recognize speech from an audio source.
+
+ Don't create ``SFSpeechRecognitionRequest`` objects directly. Create an ``SFSpeechURLRecognitionRequest`` or ``SFSpeechAudioBufferRecognitionRequest`` object instead. Use the properties of this class to configure various aspects of your request object before you start the speech recognition process. For example, use the ``shouldReportPartialResults`` property to specify whether you want partial results or only the final result of speech recognition.
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @interface SFSpeechRecognitionRequest : NSObject
 
+/**
+ A value that indicates the type of speech recognition being performed.
+
+ The default value of this property is ``SFSpeechRecognitionTaskHint/unspecified``. For a valid list of values, see ``SFSpeechRecognitionTaskHint``.
+ */
 @property (nonatomic) SFSpeechRecognitionTaskHint taskHint;
 
+/**
+ A Boolean value that indicates whether you want intermediate results returned for each utterance.
+
+ The default value of this property is `true`. If you want only final results (and you don't care about intermediate results), set this property to `false` to prevent the system from doing extra work.
+ */
 // If true, partial (non-final) results for each utterance will be reported.
-// Default is true
 @property (nonatomic) BOOL shouldReportPartialResults;
 
-// Phrases which should be recognized even if they are not in the system vocabulary
+/**
+ An array of phrases that should be recognized, even if they are not in the system vocabulary.
+
+ Use this property to specify short custom phrases that are unique to your app. You might include phrases with the names of characters, products, or places that are specific to your app. You might also include domain-specific terminology or unusual or made-up words. Assigning custom phrases to this property improves the likelihood of those phrases being recognized.
+
+ Keep phrases relatively brief, limiting them to one or two words whenever possible. Lengthy phrases are less likely to be recognized. In addition, try to limit each phrase to something the user can say without pausing.
+
+ Limit the total number of phrases to no more than 100.
+ */
 @property (nonatomic, copy) NSArray<NSString *> *contextualStrings;
 
-// String which can be used to identify the receiver by the developer
+/**
+ An identifier string that you use to describe the type of interaction associated with the speech recognition request.
+
+ If different parts of your app have different speech recognition needs, you can use this property to identify the part of your app that is making each request. For example, if one part of your app lets users speak phone numbers and another part lets users speak street addresses, consistently identifying the part of the app that makes a recognition request may help improve the accuracy of the results.
+ */
 @property (nonatomic, copy, nullable) NSString *interactionIdentifier NS_DEPRECATED(10_15, 12_0, 10_0, 15_0, "Not used anymore");
 
-// If true, speech recognition will not send any audio over the Internet
-// This will reduce accuracy but enables certain applications where it is
-// inappropriate to transmit user speech to a remote service.
-// Default is false
+/**
+ A Boolean value that determines whether a request must keep its audio data on the device.
+
+ Set this property to `true` to prevent an ``SFSpeechRecognitionRequest`` from sending audio over the network. However, on-device requests won't be as accurate.
+
+ > Note:
+ > The request only honors this setting if the ``SFSpeechRecognizer/supportsOnDeviceRecognition`` (``SFSpeechRecognizer``) property is also `true`.
+ */
 @property (nonatomic) BOOL requiresOnDeviceRecognition API_AVAILABLE(ios(13), macos(10.15), tvos(18));
 
-// If true, punctuations will be automatically included in the recognition results
+/**
+ A Boolean value that indicates whether to add punctuation to speech recognition results.
+
+ Set this property to `true` for the speech framework to automatically include punctuation in the recognition results. Punctuation includes a period or question mark at the end of a sentence, and a comma within a sentence.
+ */
 @property (nonatomic) BOOL addsPunctuation API_AVAILABLE(ios(16), macos(13), tvos(18));
 
 @property (nonatomic, copy, nullable) SFSpeechLanguageModelConfiguration *customizedLanguageModel API_AVAILABLE(ios(17), macos(14), tvos(18));
 
 @end
 
-// A request to recognize speech from a recorded audio file
+/**
+ A request to recognize speech in a recorded audio file.
+
+ Use this object to perform speech recognition on the contents of an audio file.
+
+ The following example shows a method that performs recognition on an audio file based on the user's default language and prints out the transcription.
+
+ Listing 1. Getting a speech recognizer and making a recognition request
+
+ ```swift
+ func recognizeFile(url: URL) {
+     // Create a speech recognizer associated with the user's default language.
+     guard let myRecognizer = SFSpeechRecognizer() else {
+         // The system doesn't support the user's default language.
+         return
+     }
+     
+     guard myRecognizer.isAvailable else {
+         // The recognizer isn't available.
+         return
+     }
+     
+     // Create and execute a speech recognition request for the audio file at the URL.
+     let request = SFSpeechURLRecognitionRequest(url: url)
+     myRecognizer.recognitionTask(with: request) { (result, error) in
+         guard let result else {
+             // Recognition failed, so check the error for details and handle it.
+             return
+         }
+         
+         // Print the speech transcription with the highest confidence that the
+         // system recognized.
+         if result.isFinal {
+             print(result.bestTranscription.formattedString)
+         }
+     }
+ }
+ ```
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @interface SFSpeechURLRecognitionRequest : SFSpeechRecognitionRequest
 
 - (instancetype)init NS_UNAVAILABLE;
 
-// Request to transcribe speech from an audio file from the given URL.
+/**
+ Creates a speech recognition request, initialized with the specified URL.
+
+ Use this method to create a request to recognize speech in a recorded audio file that resides at the specified URL. Pass the request to the recognizer's ``SFSpeechRecognizer/recognitionTask(with:delegate:)`` method to start recognition.
+ */
 - (instancetype)initWithURL:(NSURL *)URL NS_DESIGNATED_INITIALIZER;
 
+/**
+ The URL of the audio file.
+ */
 @property (nonatomic, readonly, copy) NSURL *URL;
 
 @end
 
-// A request to recognize speech from arbitrary audio buffers
+/**
+ A request to recognize speech from captured audio content, such as audio from the device's microphone.
+
+ Use an ``SFSpeechAudioBufferRecognitionRequest`` object to perform speech recognition on live audio, or on a set of existing audio buffers. For example, use this request object to route audio from a device's microphone to the speech recognizer.
+
+ The request object contains no audio initially. As you capture audio, call ``append(_:)`` or ``appendAudioSampleBuffer(_:)`` to add audio samples to the request object. The speech recognizer continuously analyzes the audio you appended, stopping only when you call the ``endAudio()`` method. You must call ``endAudio()`` explicitly to stop the speech recognition process.
+
+ For a complete example of how to use audio buffers with speech recognition, see [SpeakToMe: Using Speech Recognition with AVAudioEngine](https://developer.apple.com/library/archive/samplecode/SpeakToMe/Introduction/Intro.html#//apple_ref/doc/uid/TP40017110).
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @interface SFSpeechAudioBufferRecognitionRequest : SFSpeechRecognitionRequest
 
-// Preferred audio format for optimal speech recognition
+/**
+ The preferred audio format for optimal speech recognition.
+
+ Use the audio format in this property as a hint for optimal recording, but don't depend on the value remaining unchanged.
+ */
 @property (nonatomic, readonly) AVAudioFormat *nativeAudioFormat;
 
-// Append audio to the end of the recognition stream. Must currently be in native format.
+/**
+ Appends audio in the PCM format to the end of the recognition request.
+
+ The audio must be in a native format and uncompressed.
+
+ - Parameters:
+   - audioPCMBuffer: An audio buffer that contains audio in the PCM format.
+ */
 - (void)appendAudioPCMBuffer:(AVAudioPCMBuffer *)audioPCMBuffer;
+
+/**
+ Appends audio to the end of the recognition request.
+
+ The audio must be in a native format.
+
+ - Parameters:
+   - sampleBuffer: A buffer of audio.
+ */
 - (void)appendAudioSampleBuffer:(CMSampleBufferRef)sampleBuffer;
 
-// Indicate that the audio source is finished and no more audio will be appended
+/**
+ Marks the end of audio input for the recognition request.
+
+ Call this method explicitly to let the speech recognizer know that no more audio input is coming.
+ */
 - (void)endAudio;
 
 @end
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h	2025-05-24 04:29:46
@@ -11,18 +11,40 @@
 @class SFTranscription;
 @class SFSpeechRecognitionMetadata;
 
-// A recognized utterance, corresponding to a segment of recorded audio with speech and containing one or more transcriptions hypotheses
+/**
+ An object that contains the partial or final results of a speech recognition request.
+
+ Use an `SFSpeechRecognitionResult` object to retrieve the results of a speech recognition request. You don't create these objects directly. Instead, the Speech framework creates them and passes them to the handler block or delegate object you specified when starting your speech recognition task.
+
+ A speech recognition result object contains one or more ``transcriptions`` of the current utterance. Each transcription has a confidence rating indicating how likely it is to be correct. You can also get the transcription with the highest rating directly from the ``bestTranscription`` property.
+
+ If you requested partial results from the speech recognizer, the transcriptions may represent only part of the total audio content. Use the ``isFinal`` property to determine if the request contains partial or final results.
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @interface SFSpeechRecognitionResult : NSObject <NSCopying, NSSecureCoding>
 
-@property (nonatomic, readonly, copy) SFTranscription *bestTranscription;
+/**
+ The transcription with the highest confidence level.
+ */
+@property (nonatomic, readonly, copy) SFTranscription *bestTranscription __attribute__((privacy_sensitive));
 
-// Hypotheses for possible transcriptions, sorted in descending order of confidence (more likely first)
-@property (nonatomic, readonly, copy) NSArray<SFTranscription *> *transcriptions;
+/**
+ An array of potential transcriptions, sorted in descending order of confidence.
 
-// True if the hypotheses will not change; speech processing is complete.
+ All transcriptions correspond to the same utterance, which can be a partial or final result of the overall request. The first transcription in the array has the highest confidence rating, followed by transcriptions with decreasing confidence ratings.
+ */
+@property (nonatomic, readonly, copy) NSArray<SFTranscription *> *transcriptions __attribute__((privacy_sensitive));
+
+/**
+ A Boolean value that indicates whether speech recognition is complete and whether the transcriptions are final.
+
+ When a speech recognition request is final, its transcriptions don't change.
+ */
 @property (nonatomic, readonly, getter=isFinal) BOOL final;
 
+/**
+ An object that contains the metadata results for a speech recognition request.
+ */
 @property (nonatomic, nullable, readonly) SFSpeechRecognitionMetadata *speechRecognitionMetadata API_AVAILABLE(ios(14.0), macos(11.0), tvos(18));
 
 @end
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h	2025-05-29 01:59:17
@@ -12,60 +12,177 @@
 @class SFSpeechRecognitionResult;
 @class SFTranscription;
 
+/**
+ The state of the task associated with the recognition request.
+ */
 typedef NS_ENUM(NSInteger, SFSpeechRecognitionTaskState) {
-    SFSpeechRecognitionTaskStateStarting = 0,       // Speech processing (potentially including recording) has not yet begun
-    SFSpeechRecognitionTaskStateRunning = 1,        // Speech processing (potentially including recording) is running
-    SFSpeechRecognitionTaskStateFinishing = 2,      // No more audio is being recorded, but more recognition results may arrive
-    SFSpeechRecognitionTaskStateCanceling = 3,      // No more recognition results will arrive, but recording may not have stopped yet
-    SFSpeechRecognitionTaskStateCompleted = 4,      // No more results will arrive, and recording is stopped.
+    /// Speech recognition (potentially including audio recording) has not yet started.
+    SFSpeechRecognitionTaskStateStarting = 0,
+
+    /// Speech recognition (potentially including audio recording) is in progress.
+    SFSpeechRecognitionTaskStateRunning = 1,
+
+    /// Audio recording has stopped, but delivery of recognition results may continue.
+    SFSpeechRecognitionTaskStateFinishing = 2,
+
+    /// Delivery of recognition results has finished, but audio recording may be ongoing.
+    SFSpeechRecognitionTaskStateCanceling = 3,
+    
+    /// Delivery of recognition requests has finished and audio recording has stopped.
+    SFSpeechRecognitionTaskStateCompleted = 4,
 } API_AVAILABLE(ios(10.0), macos(10.15), tvos(18));
 
+/**
+ A task object for monitoring the speech recognition progress.
+
+ Use an `SFSpeechRecognitionTask` object to determine the state of a speech recognition task, to cancel an ongoing task, or to signal the end of the task.
+
+ You don't create speech recognition task objects directly. Instead, you receive one of these objects after calling ``SFSpeechRecognizer/recognitionTask(with:resultHandler:)`` or ``SFSpeechRecognizer/recognitionTask(with:delegate:)`` on your ``SFSpeechRecognizer`` object.
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @interface SFSpeechRecognitionTask : NSObject
 
+/**
+ The current state of the speech recognition task.
+
+ Check the value of this property to get the state of the in-progress speech recognition session. For valid values, see ``SFSpeechRecognitionTaskState``.
+ */
 @property (nonatomic, readonly) SFSpeechRecognitionTaskState state;
 
-// True if recognition audio input has stopped
+/**
+ A Boolean value that indicates whether audio input has stopped.
+
+ By default, the value of this property is `false`.
+ */
 @property (nonatomic, readonly, getter=isFinishing) BOOL finishing;
-// Instructs the task to stop accepting new audio (e.g. stop recording) but complete processing on audio already buffered.
+
+/**
+ Stops accepting new audio and finishes processing on the audio input that has already been accepted.
+
+ For audio buffer–based recognition, recognition does not finish until this method is called, so be sure to call it when the audio source is exhausted.
+ */
 // This has no effect on URL-based recognition requests, which effectively buffer the entire file immediately.
 - (void)finish;
 
-// True if recognition has been cancelled
+/**
+ A Boolean value that indicates whether the speech recognition task was canceled.
+
+ By default, the value of this property is `false`.
+ */
 @property (nonatomic, readonly, getter=isCancelled) BOOL cancelled;
+
+/**
+ Cancels the current speech recognition task.
+
+ You can cancel recognition tasks for both prerecorded and live audio input. For example, you might cancel a task in response to a user action or because the recording was interrupted.
+
+ When canceling a task, be sure to release any resources associated with the task, such as the audio input resources you are using to capture audio samples.
+ */
 - (void)cancel;
 
-// Reports error that occurred during recognition, if applicable
+/**
+ An error object that specifies the error that occurred during a speech recognition task.
+
+ The system may return one of the errors listed in the table below.
+
+ | Error Code | Error Domain | Description |
+ |---|---|---|
+ | `102` | `kLSRErrorDomain` | Assets are not installed. |
+ | `201` | `kLSRErrorDomain` | Siri or Dictation is disabled. |
+ | `300` | `kLSRErrorDomain` | Failed to initialize recognizer. |
+ | `301` | `kLSRErrorDomain` | Request was canceled. |
+ | `203` | `kAFAssistantErrorDomain` | Failure occurred during speech recognition. |
+ | `1100` | `kAFAssistantErrorDomain` | Trying to start recognition while an earlier instance is still active. |
+ | `1101` | `kAFAssistantErrorDomain` | Connection to speech process was invalidated. |
+ | `1107` | `kAFAssistantErrorDomain` | Connection to speech process was interrupted. |
+ | `1110` | `kAFAssistantErrorDomain` | Failed to recognize any speech. |
+ | `1700` | `kAFAssistantErrorDomain` | Request is not authorized. |
+ */
 @property (nonatomic, readonly, copy, nullable) NSError *error;
 
 @end
 
-// Recognition result receiver, to be used for complex or multi-utterance speech recognition requests
+/**
+ A protocol with methods for managing multi-utterance speech recognition requests.
+
+ The methods of this protocol give you fine-grained control over the speech recognition process. Specifically, you use this protocol when you want to know the following:
+
+ - When the first utterances of speech occur in the audio.
+ - When the speech recognizer stops accepting audio.
+ - When the speech recognition process finishes or is canceled.
+ - When the speech recognizer generates a potential transcription.
+
+ Adopt the methods of this protocol in an object and pass that object in to the `delegate` parameter of ``SFSpeechRecognizer/recognitionTask(with:delegate:)`` when starting your speech recognition task.
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @protocol SFSpeechRecognitionTaskDelegate <NSObject>
 
 @optional
 
-// Called when the task first detects speech in the source audio
+/**
+ Tells the delegate when the task first detects speech in the source audio.
+
+ - Parameters:
+   - task: The speech recognition task (an ``SFSpeechRecognitionTask`` object) that represents the request.
+ */
 - (void)speechRecognitionDidDetectSpeech:(SFSpeechRecognitionTask *)task;
 
-// Called for all recognitions, including non-final hypothesis
+/**
+ Tells the delegate that a hypothesized transcription is available.
+
+ This method is called for all recognitions, including partial recognitions.
+ 
+ - Parameters:
+   - task: The speech recognition task (an ``SFSpeechRecognitionTask`` object) that represents the request.
+   - transcription: The hypothesized transcription in an ``SFTranscription`` object.
+ */
 - (void)speechRecognitionTask:(SFSpeechRecognitionTask *)task didHypothesizeTranscription:(SFTranscription *)transcription;
 
-// Called only for final recognitions of utterances. No more about the utterance will be reported
+/**
+ Tells the delegate when the final utterance is recognized.
+
+ When this method is called, the delegate should expect no further information about the utterance to be reported.
+
+ - Parameters:
+   - task: The speech recognition task (an ``SFSpeechRecognitionTask`` object) that represents the request.
+   - recognitionResult: A recognized utterance that contains one or more transcription hypotheses in an ``SFSpeechRecognitionResult`` object.
+ */
 - (void)speechRecognitionTask:(SFSpeechRecognitionTask *)task didFinishRecognition:(SFSpeechRecognitionResult *)recognitionResult;
 
-// Called when the task is no longer accepting new audio but may be finishing final processing
+/**
+ Tells the delegate when the task is no longer accepting new audio input, even if final processing is in progress.
+
+ - Parameters:
+   - task: The speech recognition task (an ``SFSpeechRecognitionTask`` object) that represents the request.
+ */
 - (void)speechRecognitionTaskFinishedReadingAudio:(SFSpeechRecognitionTask *)task;
 
-// Called when the task has been cancelled, either by client app, the user, or the system
+/**
+ Tells the delegate that the task has been canceled.
+
+ A speech recognition task can be canceled by the user, by your app, or by the system.
+
+ - Parameters:
+   - task: The speech recognition task (an ``SFSpeechRecognitionTask`` object) that represents the request.
+ */
 - (void)speechRecognitionTaskWasCancelled:(SFSpeechRecognitionTask *)task;
 
-// Called when recognition of all requested utterances is finished.
-// If successfully is false, the error property of the task will contain error information
+/**
+ Tells the delegate when the recognition of all requested utterances is finished.
+
+ - Parameters:
+   - task: The speech recognition task (an ``SFSpeechRecognitionTask`` object) that represents the request.
+   - successfully: A Boolean value that indicates whether the task was successful. When this parameter is `false`, use the ``SFSpeechRecognitionTask/error`` property of the task to get information about why the task was unsuccessful.
+ */
 - (void)speechRecognitionTask:(SFSpeechRecognitionTask *)task didFinishSuccessfully:(BOOL)successfully;
 
-// Returns amount of audio processed by the task
+/**
+ Tells the delegate how much audio has been processed by the task.
+ 
+ - Parameters:
+   - task: The speech recognition task (an ``SFSpeechRecognitionTask`` object) that represents the request.
+   - duration: The seconds of audio input that the recognizer has processed.
+ */
 - (void)speechRecognitionTask:(SFSpeechRecognitionTask *)task didProcessAudioDuration:(NSTimeInterval)duration API_AVAILABLE(ios(10.0), macos(10.15), tvos(18));
 
 @end
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h	2025-05-24 04:29:46
@@ -4,11 +4,27 @@
 
 #import <Foundation/Foundation.h>
 
-//  Hints on kind of speech recognition being performed
+/**
+ The type of task for which you are using speech recognition.
+ */
 typedef NS_ENUM(NSInteger, SFSpeechRecognitionTaskHint) {
-    SFSpeechRecognitionTaskHintUnspecified = 0,     // Unspecified recognition
+    /// An unspecified type of task.
+    ///
+    /// Use this hint type when the intended use for captured speech does not match the other task types.
+    SFSpeechRecognitionTaskHintUnspecified = 0,
 
-    SFSpeechRecognitionTaskHintDictation = 1,       // General dictation/keyboard-style
-    SFSpeechRecognitionTaskHintSearch = 2,          // Search-style requests
-    SFSpeechRecognitionTaskHintConfirmation = 3,    // Short, confirmation-style requests ("Yes", "No", "Maybe")
+    /// A task that uses captured speech for text entry.
+    ///
+    /// Use this hint type when you are using speech recognition for a task that's similar to the keyboard's built-in dictation function.
+    SFSpeechRecognitionTaskHintDictation = 1,
+
+    /// A task that uses captured speech to specify search terms.
+    ///
+    /// Use this hint type when you are using speech recognition to identify search terms.
+    SFSpeechRecognitionTaskHintSearch = 2,
+
+    /// A task that uses captured speech for short, confirmation-style requests.
+    ///
+    /// Use this hint type when you are using speech recognition to handle confirmation commands, such as "yes," "no," or "maybe."
+    SFSpeechRecognitionTaskHintConfirmation = 3,
 } API_AVAILABLE(ios(10.0), macos(10.15), tvos(18));
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h	2025-05-24 04:29:46
@@ -19,63 +19,221 @@
 @protocol SFSpeechRecognizerDelegate;
 @protocol SFSpeechRecognitionTaskDelegate;
 
+/**
+ The app's authorization to perform speech recognition.
+ */
 typedef NS_ENUM(NSInteger, SFSpeechRecognizerAuthorizationStatus) {
+    /// The app's authorization status has not yet been determined.
+    ///
+    /// When your app's status is not determined, calling the ``SFSpeechRecognizer/requestAuthorization(_:)`` method prompts the user to grant or deny authorization.
     SFSpeechRecognizerAuthorizationStatusNotDetermined,
+    
+    /// The user denied your app's request to perform speech recognition.
     SFSpeechRecognizerAuthorizationStatusDenied,
+    
+    /// The device prevents your app from performing speech recognition.
     SFSpeechRecognizerAuthorizationStatusRestricted,
+
+    /// The user granted your app's request to perform speech recognition.
     SFSpeechRecognizerAuthorizationStatusAuthorized,
 } API_AVAILABLE(ios(10.0), macos(10.15), tvos(18));
 
+/**
+ An object you use to check for the availability of the speech recognition service, and to initiate the speech recognition process.
+
+ An ``SFSpeechRecognizer`` object is the central object for managing the speech recognizer process. Use this object to:
+
+ - Request authorization to use speech recognition services.
+ - Specify the language to use during the recognition process.
+ - Initiate new speech recognition tasks.
+
+ ### Set up speech recognition
+
+ Each speech recognizer supports only one language, which you specify at creation time. The successful creation of a speech recognizer does not guarantee that speech recognition services are available. For some languages, the recognizer might require an Internet connection. Use the ``isAvailable`` property to find out if speech recognition services are available for the current language.
+
+ To initiate the speech recognition process, do the following:
+
+ 1. Request authorization to use speech recognition. See <doc:asking-permission-to-use-speech-recognition>.
+ 2. Create an ``SFSpeechRecognizer`` object.
+ 3. Verify the availability of services using the ``isAvailable`` property of your speech recognizer object.
+ 4. Prepare your audio content.
+ 5. Create a recognition request object—an object that descends from ``SFSpeechRecognitionRequest``.
+ 6. Call the ``recognitionTask(with:delegate:)`` or ``recognitionTask(with:resultHandler:)`` method to begin the recognition process.
+
+ The type of recognition request object you create depends on whether you are processing an existing audio file or an incoming stream of audio. For existing audio files, create a ``SFSpeechURLRecognitionRequest`` object. For audio streams, create a ``SFSpeechAudioBufferRecognitionRequest`` object.
+
+ ### Create a great user experience for speech recognition
+
+ Here are some tips to consider when adding speech recognition support to your app.
+
+ - **Be prepared to handle failures caused by speech recognition limits.** Because speech recognition is a network-based service, limits are enforced so that the service can remain freely available to all apps. Individual devices may be limited in the number of recognitions that can be performed per day, and each app may be throttled globally based on the number of requests it makes per day. If a recognition request fails quickly (within a second or two of starting), check to see if the recognition service became unavailable. If it is, you may want to ask users to try again later.
+ - **Plan for a one-minute limit on audio duration.** Speech recognition places a relatively high burden on battery life and network usage. To minimize this burden, the framework stops speech recognition tasks that last longer than one minute. This limit is similar to the one for keyboard-related dictation.
+ - **Remind the user when your app is recording.** For example, display a visual indicator and play sounds at the beginning and end of speech recognition to help users understand that they're being actively recorded. You can also display speech as it is being recognized so that users understand what your app is doing and see any mistakes made during the recognition process.
+ - **Do not perform speech recognition on private or sensitive information.** Some speech is not appropriate for recognition. Don't send passwords, health or financial data, and other sensitive speech for recognition.
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @interface SFSpeechRecognizer : NSObject
 
-// Locales which support speech recognition.
-// Note that supported does not mean currently available; some locales may require an internet connection, for example.
+/**
+ Returns the set of locales that are supported by the speech recognizer.
+
+ This method returns the locales for which speech recognition is supported. Support for a locale does not guarantee that speech recognition is currently possible for that locale. For some locales, the speech recognizer requires an active Internet connection to communicate with Apple's servers. If the speech recognizer is currently unable to process requests,   ``isAvailable`` returns `false`.
+
+ Speech recognition supports the same locales that are supported by the keyboard's dictation feature. For a list of these locales, see [QuickType Keyboard: Dictation](https://www.apple.com/ios/feature-availability/#quicktype-keyboard-dictation).
+
+ - Returns: A set of locales that support speech recognition.
+ */
 + (NSSet<NSLocale *> *)supportedLocales;
 
+/**
+ Returns your app's current authorization to perform speech recognition.
+
+ The user can reject your app's request to perform speech recognition, but your request can also be denied if speech recognition is not supported on the device. The app can also change your app's authorization status at any time from the Settings app.
+
+ - Returns: The app's current authorization status value. For a list of values, see ``SFSpeechRecognizerAuthorizationStatus``.
+ */
 + (SFSpeechRecognizerAuthorizationStatus)authorizationStatus;
+
+/**
+ Asks the user to allow your app to perform speech recognition.
+
+ Call this method before performing any other tasks associated with speech recognition. This method executes asynchronously, returning shortly after you call it. At some point later, the system calls the provided `handler` block with the results.
+
+ When your app's authorization status is ``SFSpeechRecognizerAuthorizationStatus/notDetermined``, this method causes the system to prompt the user to grant or deny permission for your app to use speech recognition. The prompt includes the custom message you specify in the `NSSpeechRecognitionUsageDescription` key of your app's `Info.plist` file. The user's response is saved so that future calls to this method do not prompt the user again.
+
+ > Important:
+ > Your app's `Info.plist` file must contain the `NSSpeechRecognitionUsageDescription` key with a valid usage description. If this key is not present, your app will crash when you call this method.
+
+ For more information about requesting authorization, see <doc:asking-permission-to-use-speech-recognition>.
+
+ - Parameters:
+   - handler: The block to execute when your app's authorization status is known. The status parameter of the block contains your app's authorization status. The system does not guarantee the execution of this block on your app's main dispatch queue.
+ */
 + (void)requestAuthorization:(void(^)(SFSpeechRecognizerAuthorizationStatus status))handler;
 
 #pragma clang diagnostic push
 #pragma clang diagnostic ignored "-Wnullability"
-- (nullable instancetype)init; // Returns speech recognizer with user's current locale, or nil if is not supported
+/**
+ Creates a speech recognizer associated with the user's default language settings.
+
+ If the user's default language is not supported for speech recognition, this method attempts to fall back to the language used by the keyboard for dictation. If that fails, this method returns `nil`.
+
+ Even if this method returns a valid speech recognizer object, the speech recognition services may be temporarily unavailable. To determine whether speech recognition services are available, check the ``isAvailable`` property.
+
+ - Returns: An initialized speech recognizer object, or `nil` if there was a problem creating the object.
+ */
+- (nullable instancetype)init;
 #pragma clang diagnostic pop
 
-- (nullable instancetype)initWithLocale:(NSLocale *)locale NS_DESIGNATED_INITIALIZER; // returns nil if the locale is not supported
+/**
+ Creates a speech recognizer associated with the specified locale.
 
+ If you specify a language that is not supported by the speech recognizer, this method attempts to fall back to the language used by the keyboard for dictation. If that fails, this method returns `nil`.
+
+ Even if this method returns a valid speech recognizer object, the speech recognition services may be temporarily unavailable. To determine whether speech recognition services are available, check the ``isAvailable`` property.
+ 
+ - Parameters:
+   - locale: The locale object representing the language you want to use for speech recognition. For a list of languages supported by the speech recognizer, see ``supportedLocales()``.
+
+ - Returns: An initialized speech recognizer object, or `nil` if the specified language was not supported.
+ */
+- (nullable instancetype)initWithLocale:(NSLocale *)locale NS_DESIGNATED_INITIALIZER;
+
+/**
+ A Boolean value that indicates whether the speech recognizer is currently available.
+
+ When the value of this property is `true`, you may create new speech recognition tasks. When value of this property is `false`, speech recognition services are not available.
+ */
 @property (nonatomic, readonly, getter=isAvailable) BOOL available;
+
+/**
+ The locale of the speech recognizer.
+
+ The locale of the speech recognizer is an `NSLocale` object. The default value of this property is the system locale (that is, `+[NSLocale systemLocale]`).
+ */
 @property (nonatomic, readonly, copy) NSLocale *locale;
 
-// True if this recognition can handle requests with requiresOnDeviceRecognition set to true
+/**
+ A Boolean value that indicates whether the speech recognizer can operate without network access.
+
+ An ``SFSpeechRecognitionRequest`` can only honor its ``SFSpeechRecognitionRequest/requiresOnDeviceRecognition`` property if ``supportsOnDeviceRecognition`` is `true`. If ``supportsOnDeviceRecognition`` is `false`, the ``SFSpeechRecognizer`` requires a network in order to recognize speech.
+ */
 @property (nonatomic) BOOL supportsOnDeviceRecognition API_AVAILABLE(ios(13), tvos(18));
 
+/**
+ The delegate object that handles changes to the availability of speech recognition services.
+
+ Provide a delegate object when you want to monitor changes to the availability of speech recognition services. Your delegate object must conform to the ``SFSpeechRecognizerDelegate`` protocol.
+ */
 @property (nonatomic, weak) id<SFSpeechRecognizerDelegate> delegate;
 
-// Default task for requests, overrides SFSpeechRecognitionTaskHintUnspecified for requests
+/**
+ A hint that indicates the type of speech recognition being requested.
+
+ By default, the value of this property overrides the ``SFSpeechRecognitionTaskHint/unspecified`` value for requests. For possible values, see ``SFSpeechRecognitionTaskHint``.
+ */
 @property (nonatomic) SFSpeechRecognitionTaskHint defaultTaskHint;
 
-// Recognize speech utterance with a request
-// If request.shouldReportPartialResults is true, result handler will be called
-// repeatedly with partial results, then finally with a final result or an error.
+/**
+ Executes the speech recognition request and delivers the results to the specified handler block.
+
+ Use this method to initiate the speech recognition process on the audio contained in the request object. This method executes asynchronously and returns a ``SFSpeechRecognitionTask`` object that you can use to cancel or finalize the recognition process later. As results become available, the method calls the block in the `resultHandler` parameter.
+
+ - Parameters:
+   - request: A request (in an ``SFSpeechRecognitionRequest`` object) to recognize speech from an audio source.
+   - resultHandler: The block to call when partial or final results are available, or when an error occurs. If the ``SFSpeechRecognitionRequest/shouldReportPartialResults`` property is `true`, this block may be called multiple times to deliver the partial and final results. The block has no return value and takes the following parameters:
+
+     - term result: A ``SFSpeechRecognitionResult`` containing the partial or final transcriptions of the audio content.
+     - term error: An error object if a problem occurred. This parameter is `nil` if speech recognition was successful.
+
+ - Returns: The task object you can use to manage an in-progress recognition request.
+ */
 - (SFSpeechRecognitionTask *)recognitionTaskWithRequest:(SFSpeechRecognitionRequest *)request
                                           resultHandler:(void (^)(SFSpeechRecognitionResult * __nullable result, NSError * __nullable error))resultHandler;
 
-// Advanced API: Recognize a custom request with with a delegate
-// The delegate will be weakly referenced by the returned task
+/**
+ Recognizes speech from the audio source associated with the specified request, using the specified delegate to manage the results.
+
+ Use this method to initiate the speech recognition process on the audio contained in the request object. This method executes asynchronously and returns a ``SFSpeechRecognitionTask`` object that you can use to cancel or finalize the recognition process later. As results become available, the method calls the methods of the provided `delegate` object.
+
+ Note that the ``SFSpeechRecognitionTask`` object returned by this method does not retain your delegate object. You must maintain a strong reference to your delegate while speech recognition is in progress.
+
+ - Parameters:
+   - request: A request (encapsulated in an ``SFSpeechRecognitionRequest`` object) to recognize speech from an audio source.
+   - delegate: An object that can handle results from the speech recognition task. This object must conform to the ``SFSpeechRecognitionTaskDelegate`` protocol.
+
+ - Returns: The task object you can use to manage an in-progress recognition request.
+ */
 - (SFSpeechRecognitionTask *)recognitionTaskWithRequest:(SFSpeechRecognitionRequest *)request
                                                delegate:(id <SFSpeechRecognitionTaskDelegate>)delegate;
 
-// Queue used the recognizer for recognition task handlers and delegate messages
-// Defaults to the main queue
+/**
+ The queue on which to execute recognition task handlers and delegate methods.
+
+ The default value of this property is the app's main queue. Assign a different queue if you want delegate methods and handlers to be executed on a background queue.
+
+ The handler you pass to the ``requestAuthorization(_:)`` method does not use this queue.
+ */
 @property (nonatomic, strong) NSOperationQueue *queue;
 
 @end
 
+/**
+ A protocol that you adopt in your objects to track the availability of a speech recognizer.
+
+ A speech recognizer's availability can change due to the device's Internet connection or other factors. Use this protocol's optional method to track those changes and provide an appropriate response. For example, when speech recognition becomes unavailable, you might disable related features in your app.
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @protocol SFSpeechRecognizerDelegate <NSObject>
 @optional
 
-// Called when the availability of the given recognizer changes
+/**
+ Tells the delegate that the availability of its associated speech recognizer changed.
+
+ - Parameters:
+   - speechRecognizer: The ``SFSpeechRecognizer`` object whose availability changed.
+   - available: A Boolean value that indicates the new availability of the speech recognizer.
+ */
 - (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer availabilityDidChange:(BOOL)available;
 
 @end
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h	2025-05-24 04:29:46
@@ -9,19 +9,42 @@
 
 @class SFTranscriptionSegment;
 
-// A hypothesized text form of a speech recording
+/**
+ A textual representation of the specified speech in its entirety, as recognized by the speech recognizer.
+
+ Use `SFTranscription` to obtain all the recognized utterances from your audio content. An _utterance_ is a vocalized word or group of words that represent a single meaning to the speech recognizer (``SFSpeechRecognizer``).
+
+ Use the ``formattedString`` property to retrieve the entire transcription of utterances, or use the ``segments`` property to retrieve an individual utterance (``SFTranscriptionSegment``).
+
+ You don't create an `SFTranscription` directly. Instead, you retrieve it from an ``SFSpeechRecognitionResult`` instance. The speech recognizer sends a speech recognition result to your app in one of two ways, depending on how your app started a speech recognition task.
+
+ You can start a speech recognition task by using the speech recognizer's ``SFSpeechRecognizer/recognitionTask(with:resultHandler:)`` method. When the task is complete, the speech recognizer sends an ``SFSpeechRecognitionResult`` instance to your `resultHandler` closure. Alternatively, you can use the speech recognizer's ``SFSpeechRecognizer/recognitionTask(with:delegate:)`` method to start a speech recognition task. When the task is complete, the speech recognizer uses your ``SFSpeechRecognitionTaskDelegate`` to send an ``SFSpeechRecognitionResult`` by using the delegate's ``SFSpeechRecognitionTaskDelegate/speechRecognitionTask(_:didFinishRecognition:)`` method.
+
+ An `SFTranscription` represents only a potential version of the speech. It might not be an accurate representation of the utterances.
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @interface SFTranscription : NSObject <NSCopying, NSSecureCoding>
 
-// Contains the entire recognition, formatted into a single user-displayable string
-@property (nonatomic, readonly, copy) NSString *formattedString;
+/**
+ The entire transcription of utterances, formatted into a single, user-displayable string.
+ */
+@property (nonatomic, readonly, copy) NSString *formattedString __attribute__((privacy_sensitive));
 
+/**
+ An array of transcription segments that represent the parts of the transcription, as identified by the speech recognizer.
+
+ The order of the segments in the array matches the order in which the corresponding utterances occur in the spoken content.
+ */
 @property (nonatomic, readonly, copy) NSArray<SFTranscriptionSegment *> *segments;
 
-// Measures the number of words spoken per minute
+/**
+ The number of words spoken per minute.
+ */
 @property (nonatomic, readonly) double speakingRate NS_DEPRECATED(10_15, 11_3, 13_0, 14_5, "speakingRate is moved to SFSpeechRecognitionMetadata");
 
-// Measures average pause between words (in seconds)
+/**
+ The average pause duration between words, measured in seconds.
+ */
 @property (nonatomic, readonly) NSTimeInterval averagePauseDuration NS_DEPRECATED(10_15, 11_3, 13_0, 14_5, "averagePauseDuration is moved to SFSpeechRecognitionMetadata");
 
 @end
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h	2025-05-24 04:29:46
@@ -9,23 +9,64 @@
 
 @class SFVoiceAnalytics;
 
-// Substrings of a hypothesized transcription
+/**
+ A discrete part of an entire transcription, as identified by the speech recognizer.
+
+ Use ``SFTranscriptionSegment`` to get details about a part of an overall ``SFTranscription``. An ``SFTranscriptionSegment`` represents an utterance, which is a vocalized word or group of words that represent a single meaning to the speech recognizer (``SFSpeechRecognizer``).
+
+ You don't create transcription object segments directly. Instead, you access them from a transcription's ``SFTranscription/segments`` property.
+
+ A transcription segment includes the following information:
+
+ - The text of the utterance, plus any alternative interpretations of the spoken word.
+ - The character range of the segment within the ``SFTranscription/formattedString`` of its parent ``SFTranscription``.
+ - A ``confidence`` value, indicating how likely it is that the specified string matches the audible speech.
+ - A ``timestamp`` and ``duration`` value, indicating the position of the segment within the provided audio stream.
+ */
 API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
 @interface SFTranscriptionSegment : NSObject <NSCopying, NSSecureCoding>
 
-@property (nonatomic, readonly, copy) NSString *substring;
+/**
+ The string representation of the utterance in the transcription segment.
+ */
+@property (nonatomic, readonly, copy) NSString *substring __attribute__((privacy_sensitive));
+
+/**
+ The range information for the transcription segment's substring, relative to the overall transcription.
+
+ Use the range information to find the position of the segment within the ``SFTranscription/formattedString`` property of the ``SFTranscription`` object containing this segment.
+ */
 @property (nonatomic, readonly) NSRange substringRange;
 
-// Relative to start of audio
+/**
+ The start time of the segment in the processed audio stream.
+
+ The ``timestamp`` is the number of seconds between the beginning of the audio content and when the user spoke the word represented by the segment. For example, if the user said the word "time" one second into the transcription "What time is it", the timestamp would be equal to `1.0`.
+ */
 @property (nonatomic, readonly) NSTimeInterval timestamp;
+
+/**
+ The number of seconds it took for the user to speak the utterance represented by the segment.
+
+ The ``duration`` contains the number of seconds it took for the user to speak the one or more words (utterance) represented by the segment. For example, the ``SFSpeechRecognizer`` sets ``duration`` to `0.6` if the user took `0.6` seconds to say `“time”` in the transcription of `“What time is it?"`.
+ */
 @property (nonatomic, readonly) NSTimeInterval duration;
 
-// Confidence in the accuracy of transcription. Scale is 0 (least confident) to 1.0 (most confident)
+/**
+ The level of confidence the speech recognizer has in its recognition of the speech transcribed for the segment.
+
+ This property reflects the overall confidence in the recognition of the entire phrase. The value is `0` if there was no recognition, and it is closer to `1` when there is a high certainty that a transcription matches the user's speech exactly. For example, a confidence value of `0.94` represents a very high confidence level, and is more likely to be correct than a transcription with a confidence value of `0.72`.
+ */
 @property (nonatomic, readonly) float confidence;
 
-// Other possible interpretations of this segment
-@property (nonatomic, readonly) NSArray<NSString *> *alternativeSubstrings;
+/**
+ An array of alternate interpretations of the utterance in the transcription segment.
+*/
+@property (nonatomic, readonly) NSArray<NSString *> *alternativeSubstrings __attribute__((privacy_sensitive));
 
+/**
+ An analysis of the transcription segment's vocal properties.
+ */
 @property (nonatomic, nullable, readonly) SFVoiceAnalytics *voiceAnalytics NS_DEPRECATED(10_15, 11_3, 13_0, 14_5, "voiceAnalytics is moved to SFSpeechRecognitionMetadata");
 
 @end
diff -ruN /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h
--- /Applications/Xcode_16.4.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h	2025-04-19 02:55:57
+++ /Applications/Xcode_26.0.0-beta.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h	2025-05-24 04:29:46
@@ -6,31 +6,62 @@
 
 NS_ASSUME_NONNULL_BEGIN
 
-// An acoustic feature
+/**
+ The value of a voice analysis metric.
+ */
 API_AVAILABLE(ios(13), macos(10.15), tvos(18))
 @interface SFAcousticFeature : NSObject <NSCopying, NSSecureCoding>
 
-// Array of feature values per audio frame, corresponding to a segment of recorded audio
+/**
+ An array of feature values, one value per audio frame, corresponding to a transcript segment of recorded audio.
+ */
 @property (nonatomic, readonly, copy) NSArray<NSNumber *> *acousticFeatureValuePerFrame;
 
-// Duration of an audio frame
+/**
+ The duration of the audio frame.
+ */
 @property (nonatomic, readonly) NSTimeInterval frameDuration;
 
 @end
 
-// Voice analytics corresponding to a segment of recorded audio
+/**
+ A collection of vocal analysis metrics.
+
+ Use an ``SFAcousticFeature`` object to access the `SFVoiceAnalytics` insights. Voice analytics include the following features:
+
+ - Use ``jitter`` to measure how pitch varies in audio.
+ - Use ``shimmer`` to measure how amplitude varies in audio.
+ - Use ``pitch`` to measure the highness and lowness of the tone.
+ - Use ``voicing`` to identify voiced regions in speech.
+
+ These results are part of the ``SFTranscriptionSegment`` object and are available when the system sends the ``SFSpeechRecognitionResult/isFinal`` flag.
+ */
 API_AVAILABLE(ios(13), macos(10.15), tvos(18))
 @interface SFVoiceAnalytics : NSObject <NSCopying, NSSecureCoding>
 
+/**
+ The variation in pitch in each frame of a transcription segment, expressed as a percentage of the frame's fundamental frequency.
+ */
 // Jitter measures vocal stability and is measured as an absolute difference between consecutive periods, divided by the average period. It is expressed as a percentage
 @property (nonatomic, readonly, copy) SFAcousticFeature *jitter;
 
-// Shimmer measures vocal stability and is measured in decibels
+/**
+ The variation in vocal volume stability (amplitude) in each frame of a transcription segment, expressed in decibels.
+ */
 @property (nonatomic, readonly, copy) SFAcousticFeature *shimmer;
 
-// Pitch measures the highness and lowness of tone and is measured in logarithm of normalized pitch estimates
+/**
+ The highness or lowness of the tone (fundamental frequency) in each frame of a transcription segment, expressed as a logarithm.
+
+ The value is a logarithm (base `e`) of the normalized pitch estimate for each frame.
+*/
 @property (nonatomic, readonly, copy) SFAcousticFeature *pitch;
 
+/**
+ The likelihood of a voice in each frame of a transcription segment.
+
+ The `voicing` value is expressed as a probability in the range `[0.0, 1.0]`.
+ */
 // Voicing measures the probability of whether a frame is voiced or not and is measured as a probability
 @property (nonatomic, readonly, copy) SFAcousticFeature *voicing;
 
Clone this wiki locally