CameraX - Unable to get camera ID for use case androidx.camera.core.Preview-4817149b-004d-42b8-a103-ea998038268b - java.lang.IllegalArgumentException - androidx

While implementing the code from Google CodeLabs I am getting this crash report while starting the CameraActivity - CameraX - Google Code Labs
Logs:
Process: in.novopay.novoloan, PID: 5845
java.lang.IllegalArgumentException: Unable to get camera ID for use case
androidx.camera.core.Preview-4817149b-004d-42b8-a103-ea998038268b
at androidx.camera.camera2.impl.Camera2DeviceSurfaceManager.getCameraIdFromConfig(Camera2DeviceSurfaceManager.java:310)
at androidx.camera.camera2.impl.Camera2DeviceSurfaceManager.requiresCorrectedAspectRatio(Camera2DeviceSurfaceManager.java:268)
at androidx.camera.core.Preview.updateUseCaseConfig(Preview.java:387)
at androidx.camera.core.UseCase.(UseCase.java:92)
at androidx.camera.core.Preview.(Preview.java:99) at in.novopay.uicontrollibrary.activities.CameraActivity.startCamera(CameraActivity.kt:94)
at in.novopay.uicontrollibrary.activities.CameraActivity.access$startCamera(CameraActivity.kt:30)
at in.novopay.uicontrollibrary.activities.CameraActivity$checkPermission$1.run(CameraActivity.kt:45)
at android.os.Handler.handleCallback(Handler.java:790)
at android.os.Handler.dispatchMessage(Handler.java:99)
at android.os.Looper.loop(Looper.java:164)
at android.app.ActivityThread.main(ActivityThread.java:7000)
at java.lang.reflect.Method.invoke(Native Method)
at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:441)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:1408)
Caused by: java.lang.IllegalArgumentException: Option does not exist: Option{id=camerax.core.camera.lensFacing, valueClass=class
androidx.camera.core.CameraX$LensFacing, token=null}
// This is an arbitrary number we are using to keep tab of the permission
// request. Where an app has multiple context for requesting permission,
// this can help differentiate the different contexts
private const val REQUEST_CODE_PERMISSIONS = 10
// This is an array of all the permission specified in the manifest
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
class CameraActivity : AppCompatActivity(), LifecycleOwner {
lateinit var viewFinder: TextureView
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_camera)
viewFinder = findViewById(R.id.texture_view)
// startCamera()
checkPermission();
}
fun checkPermission() {
if (allPermissionsGranted()) {
viewFinder.post { startCamera() }
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS)
}
// Every time the provided texture view changes, recompute layout
viewFinder.addOnLayoutChangeListener { _, _, _, _, _, _, _, _, _ ->
updateTransform()
}
}
/**
* Process result from permission request dialog box, has the request
* been granted? If yes, start Camera. Otherwise display a toast
*/
override fun onRequestPermissionsResult(requestCode: Int, permissions: Array<String>, grantResults: IntArray) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
viewFinder.post {
startCamera()
}
} else {
Toast.makeText(this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT).show()
finish()
}
}
}
/**
* Check if all permission specified in the manifest have been granted
*/
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it) == PackageManager.PERMISSION_GRANTED
}
private fun startCamera() {
// Create configuration object for the viewfinder use case
val previewConfig = PreviewConfig.Builder().apply {
setTargetAspectRatio(Rational(1, 1))
setTargetResolution(Size(640, 640))
}.build()
// Build the viewfinder use case
val preview = Preview(previewConfig)
// Every time the viewfinder is updated, recompute layout
preview.setOnPreviewOutputUpdateListener {
// To update the SurfaceTexture, we have to remove it and re-add it
val parent = viewFinder.parent as ViewGroup
parent.removeView(viewFinder)
parent.addView(viewFinder, 0)
viewFinder.surfaceTexture = it.surfaceTexture
updateTransform()
}
// Bind use cases to lifecycle
// If Android Studio complains about "this" being not a LifecycleOwner
// try rebuilding the project or updating the appcompat dependency to
// version 1.1.0 or higher.
CameraX.bindToLifecycle(this, preview)
}
private fun updateTransform() {
val matrix = Matrix()
// Compute the center of the view finder
val centerX = viewFinder.width / 2f
val centerY = viewFinder.height / 2f
// Correct preview output to account for display rotation
val rotationDegrees = when(viewFinder.display.rotation) {
Surface.ROTATION_0 -> 0
Surface.ROTATION_90 -> 90
Surface.ROTATION_180 -> 180
Surface.ROTATION_270 -> 270
else -> return
}
matrix.postRotate(-rotationDegrees.toFloat(), centerX, centerY)
// Finally, apply transformations to our TextureView
viewFinder.setTransform(matrix)
}
}

val previewConfig = PreviewConfig.Builder().apply {
setTargetAspectRatio(Rational(1,1))
setTargetResolution(Size(640,640))
setLensFacing(androidx.camera.core.CameraX.LensFacing.BACK)
}.build()

Related

SwiftUI - share dictionary among views, unclear what arguments to use at #Main / WindowGroup

I'm trying to build an app (macOS, but would be the same for iOS) that creates a number of grids, the outcome of which is to be shown in a second screen. For this, I'm sharing data across these screens, and I'm running into an issue here, I hope someone can help or point me in the right direction. I'll share a simplified version of the code below (working in Xcode 14.0.1)
The code creates a dictionary that can be shown in a grid, on which calculations can be done. The idea is then to add this grid, with some descriptive variables, into another dictionary
The building blocks of the grid are cells
Import Foundation
struct Cell: Comparable, Equatable, Identifiable, Hashable {
static func == (lhs: Cell, rhs: Cell) -> Bool {
lhs.randomVarOne == rhs.randomVarOne
}
var randomVarOne: Double
var randomVarTwo: Bool
// other vars omitted here
var id: Int { randomVarOne }
static func < (lhs: Cell, rhs: Cell) -> Bool {
return lhs.randomVarOne < rhs.randomVarOne
}
}
this is also where there are a bunch of funcs to calculate next neighbor cells in the grid etc
then the grid is defined in a class:
class Info: ObservableObject, Hashable {
static func == (lhs: Info, rhs: Info) -> Bool {
lhs.grid == rhs.grid
}
func hash(into hasher: inout Hasher) {
hasher.combine(grid)
}
#Published var grid = [Cell]()
var arrayTotal = 900
#Published var toBeUsedForTheGridCalculations: Double = 0.0
var toBeUsedToSetTheVarAbove: Double = 0.0
var rowTotalDouble: Double {sqrt(Double(arrayTotal)) }
var rowTotal: Int {
Int(rowTotalDouble) != 0 ? Int(rowTotalDouble) : 10 }
The class includes a func to create and populate the grid with Cells and add these Cells to the grid var. It also includes the formulas to do the calculations on the grid using a user input. The class did not seem to need an initializer.
This is the Scenario struct:
struct Scenario: Comparable, Equatable, Identifiable, Hashable {
static func == (lhs: Scenario, rhs: Scenario) -> Bool {
lhs.scenarioNumber == rhs.scenarioNumber
}
func hash(into hasher: inout Hasher) {
hasher.combine(scenarioNumber)
}
var scenarioNumber: Int
var date: Date
var thisIsOneSnapshot = [Info]()
var id: Int { scenarioNumber }
static func < (lhs: Scenario, rhs: Scenario) -> Bool {
return lhs.scenarioNumber < rhs.scenarioNumber
}
}
added hashable since it uses the Info class as an input.
Then there is the class showing the output overview
class OutputOverview: ObservableObject {
#Published var snapshot = [Scenario]()
// the class includes a formula of how to add the collection of cells (grid) and the additional variables to the snapshot dictionary. Again no initializer was necessary.
Now to go to the ContentView.
struct ContentView: View {
#Environment(\.openURL) var openURL
var scenarioNumberInput: Int = 0
var timeStampAssigned: Date = Date.now
#ObservedObject private var currentGrid: Info = Info()
#ObservedObject private var scenarios: Combinations = Combinations()
var usedForTheCalculations: Double = 0.0
var rows =
[
GridItem(.flexible()),
// whole list of GridItems, I do not know how to calculate these:
// var rows = Array(repeating: GridItem(.flexible()), count: currentGrid.rowTotal)
//gives error "Cannot use instance member 'currentGrid' within property initializer;
// property iunitializers run before 'self' is available
]
var body: some View {
GeometryReader { geometry in
VStack {
ScrollView {
LazyHGrid(rows: rows, spacing: 0) {
ForEach(0..<currentGrid.grid.count, id :\.self) { w in
let temp = currentGrid.grid[w].varThatAffectsFontColor
let temp2 = currentGrid.grid[w].varThatAffectsBackground
Text("\(currentGrid.grid[w].randomVarOne, specifier: "%.2f")")
.frame(width: 25, height: 25)
.border(.black)
.font(.system(size: 7))
.foregroundColor(Color(wordName: temp))
.background(Color(wordName: temp2))
}
}
.padding(.top)
}
VStack{
HStack {
Button("Start") {
}
// then some buttons to do the calculations
Button("Add to collection"){
scenarios.addScenario(numbering: scenarioNumberInput, timeStamp:
Date.now, collection: currentGrid.grid)
} // this should add the newly recalculated grid to the dictionary
Button("Go to Results") {
guard let url = URL(string: "myapp://scenario") else { return }
openURL(url)
} // to go to the screen showing the scenarios
Then the second View, the ScenarioView:
struct ScenarioView: View {
#State var selectedScenario = 1
#ObservedObject private var scenarios: OutputOverview
var pickerNumbers = [ 1, 2, 3, 4 , 5]
// this is to be linked to the number of scenarios completed,this code is not done yet.
var rows =
[
GridItem(.flexible()),
GridItem(.flexible()),
// similar list of GridItems here....
var body: some View {
Form {
Section {
Picker("Select a scenario", selection: $selectedScenario) {
ForEach(pickerNumbers, id: \.self) {
Text("\($0)")
}
}
}
Section {
ScrollView {
if let idx = scenarios.snapshot.firstIndex(where:
{$0.scenarioNumber == selectedScenario}) {
LazyHGrid(rows: rows, spacing: 0) {
ForEach(0..<scenarios.snapshot[idx].thisIsOneSnapshot.count,
id :\.self) { w in
let temp =
scenarios.snapshot[idx].thisIsOneSnapshot[w].varThatAffectsFontColor
let temp2 =
scenarios.snapshot[idx].thisIsOneSnapshot[w].varThatAffectsBackground
Text("\(scenarios.snapshot[idx].thisIsOneSnapshot[w].randomVarOne, specifier: "%.2f")")
.frame(width: 25, height: 25)
.border(.black)
.font(.system(size: 7))
.foregroundColor(Color(wordName: temp))
.background(Color(wordName: temp2))
}
}
}
}
}
}
}
}
Now while the above does not (for the moment..) give me error messages, I am not able to run the PreviewProvider in the second View. The main problem is in #main:
import SwiftUI
#main
struct ThisIsTheNameOfMyApp: App {
var body: some Scene {
WindowGroup {
ContentView()
}
.handlesExternalEvents(matching: ["main"])
WindowGroup("Scenarios") {
ScenarioView()
// error messages here: 'ScenarioView' initializer is inaccessible due to "private"
// protection level - I don't know what is set to private in ScenarioView that could
// cause this
// second error message: missing argument for parameter 'scenarios' in call
}
.handlesExternalEvents(matching: ["scenario"])
}
}
I am at a loss on how to solve these 2 error messages and would be very grateful for any tips or guidance. Apologies if this question is very long, I scanned many other forum questions and could not find any good answers.
I have tried adding pro forma data in #main as follows
#main
struct FloodModelScenarioViewerApp: App {
#State var scenarios = Scenario(scenarioNumber: 1, date: Date.now)
var body: some Scene {
WindowGroup {
ContentView()
}
.handlesExternalEvents(matching: ["main"])
WindowGroup("Scenarios") {
ScenarioView(scenarios: scenarios)
}
.handlesExternalEvents(matching: ["scenario"])
}
}
This still gives 2 error messages:
same issue with regards to ScenarioView initialiser being inaccessible due to being 'private'
Cannot convert value of type 'Scenario' to expected argument type 'OutputOverview'
Just remove the private from
#ObservedObject private var scenarios: OutputOverview
The value is coming from he parent so the parent needs access. So put
#StateObject private var scenarios: OutputOverview = .init()
in FloodModelScenarioViewerApp
#StateObject is for initializing ObservableObjects and #ObservedObject is for passing them around.
I don't know if your code will work after you read this question, and that's because there are many things to correct, but you can start with these:
In Cell, you shouldn't use an id that is a variable, this may cause inconsistent behavior. Use something like:
let id = UUID()
When you initialize ContentView, you can't use currentGrid inside a variable because currentGrid will not be available before all variables are initialized. Meaning, you are trying to initialize rows before currentGrid actually exists. You can try using the .onAppear modifier:
var rows = [GridItem]()
var body: some View {
GeometryReader { geometry in
// ... view code in here
}
.onAppear {
var rows = Array(repeating: GridItem(.flexible()), count: currentGrid.rowTotal)
}
}
This creates the view and, before showing it, the grid is set to its proper value.
The message 'ScenarioView' initializer is inaccessible due to "private" protection level seems clear: you must provide a value to to the variable scenarios (it doesn't have a default value) but it's marked as private. Remove private.
#ObservedObject var scenarios: OutputOverview
Then, remember to pass a value of type OutputOverview for the variable when you call the view:
ScenarioView(scenarios: aVariableOfTypeOutputOverview)
The type mismatch error you get inside the #main code is also clear - you have defined a variable of type Scenario:
#State var scenarios = Scenario(scenarioNumber: 1, date: Date.now)
but ScenarioView requires another type:
#ObservedObject private var scenarios: OutputOverview
One of them needs change for your code to work.

Why FirebaseVisionImage.fromMediaImage() produces OutOfMemoryError

CameraX is build, analyze() method is called and an image is passed and then closed (deleted) with close() method. From this image FirebaseVisionImage is created and passed for processing (text recognition). Code samples and code labs differs and not implement TextRecognition with CameraX or using old API versions.
override fun analyze(imageProxy: ImageProxy) {
if (isValidText) {
imageProxy.close()
return
}
val mediaImage = imageProxy.image // requires annotation
val degrees = imageProxy.imageInfo.rotationDegrees
val rotation = rotationDegreesToFirebaseRotation(degrees)
if (mediaImage != null) {
runTextRecognition(mediaImage, rotation) // line 44
}
imageProxy.close()
}
private fun runTextRecognition(mediaImage: Image, rotation: Int) {
// Create FirebaseVisionImage from frame
val visionImage = FirebaseVisionImage.fromMediaImage(mediaImage, rotation) // line 64
val recognizer = FirebaseVision.getInstance()
.onDeviceTextRecognizer
recognizer.processImage(visionImage)
.addOnSuccessListener { texts ->
processTextRecognitionResult(texts!!, recognizer)
if (isValidText) {
recognizer.close()
return#addOnSuccessListener
}
}
.addOnFailureListener { e -> // Task failed with an exception
e.printStackTrace()
}
}
In my project I'm using this dependencies
def firebase_version = '24.0.2'
def camerax_version = '1.0.0-beta02'
implementation "com.google.firebase:firebase-ml-vision:$firebase_version"
implementation "androidx.camera:camera-camera2:$camerax_version"
implementation "androidx.camera:camera-view:1.0.0-alpha09"
implementation "androidx.camera:camera-lifecycle:${camerax_version}"
and this is how I build CameraX
private fun bindPreview(cameraProvider: ProcessCameraProvider) {
// Get screen metrics used to setup camera for full screen resolution
val metrics = DisplayMetrics().also { viewFinder?.display?.getRealMetrics(it) }
val screenAspectRatio = aspectRatio(metrics.widthPixels, metrics.heightPixels)
val rotation = viewFinder?.display?.rotation
// Set up the preview use case to display camera preview
val preview = Preview.Builder()// Request aspect ratio but no resolution
.setTargetAspectRatio(screenAspectRatio)
// Set initial target rotation
.setTargetRotation(rotation!!)
.build()
// Choose the camera by requiring a lens facing
val cameraSelector = CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_BACK)
.build()
val executor = Executors.newSingleThreadExecutor()
// Must unbind the use-cases before rebinding them
cameraProvider.unbindAll()
val imageAnalyzer = ImageAnalysis.Builder()
// Request aspect ratio but no resolution
.setTargetAspectRatio(screenAspectRatio)
// Set initial target rotation, have to call this again if rotation changes
// during the lifecycle of this use case
.setTargetRotation(rotation)
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
imageAnalyzer.setAnalyzer(executor, analyzer)
var camera = cameraProvider.bindToLifecycle(viewFinder?.context as LifecycleOwner, cameraSelector, preview, imageAnalyzer)
// Attach the viewfinder's surface provider to preview use case
preview.setSurfaceProvider(viewFinder?.createSurfaceProvider(camera.cameraInfo))
}
I was able to resolve the issue by switching to mlkit.
First update the app/build.gradle file to use mlkit instead of firebase:
// Add ML Kit dependencies
implementation 'com.google.android.gms:play-services-mlkit-text-recognition:16.1.0'
Next update the analyzer to use InputImage:
#androidx.camera.core.ExperimentalGetImage
private class TextAnalyzer(private val listener: TextListener) : ImageAnalysis.Analyzer {
override fun analyze(imageProxy: ImageProxy) {
val mediaImage: Image = imageProxy.image ?: return
val image = InputImage.fromMediaImage(mediaImage, imageProxy.imageInfo.rotationDegrees)
runTextRecognition(image)
imageProxy.close()
}
Then update runTextRecognition to:
private fun runTextRecognition(image: InputImage) {
val recognizer = TextRecognition.getClient()
recognizer.process(image)
...
}
That should do it.
Here is the codelab that gives more details.

MutlicastSocket receive not always

I want to implement a "simple" SSDP discovering client. Means the client should send out a
M-SEARCH * HTTP/1.1
HOST: 239.255.255.250:1900
MAN: "ssdp:discover"
MX: 1
ST: ssdp:all
and afterwards listen to "the network"(?) to get the list of IP addresses.
To test the implementation I've written a unit test which creates a "fake" MulticastServer which simply hear to the SSDP IP&Port and, when receive something, send the same message back.
The problem is that this code works on my machine (macOS) most of the time but never on our CI Server (Linux). I (macOS) receive sometimes the same assertion failed error as on the CI. But as I said - only sometimes! Not always. And I don't know why.
This is the implementation on the client side:
interface GatewayDiscoverer {
companion object {
val instance: GatewayDiscoverer = DefaultGatewayDiscoverer()
}
suspend fun discoverGateways(timeoutMillis: Int = 1000): List<String>
}
internal class DefaultGatewayDiscoverer : GatewayDiscoverer {
override suspend fun discoverGateways(timeoutMillis: Int): List<String> {
require(timeoutMillis in 1000..5000) {
"timeoutMillis should be between 1000 (inclusive) and 5000 (inclusive)!"
}
val socket = DatagramSocket()
sendSsdpPacket(socket)
val gateways = receiveSsdpPacket(socket, timeoutMillis)
return gateways
}
private fun sendSsdpPacket(socket: DatagramSocket) {
val packetToSend =
"M-SEARCH * HTTP/1.1\r\nHOST: 239.255.255.250:1900\r\nMAN: \"ssdp:discover\"\r\nMX: 1\r\nST: ssdp:all\r\n\r\n"
val packetToSendAsBytes = packetToSend.toByteArray()
val packet = DatagramPacket(
packetToSendAsBytes,
packetToSendAsBytes.size,
InetAddress.getByName("239.255.255.250"),
1900
)
socket.send(packet)
}
private fun receiveSsdpPacket(socket: DatagramSocket, timeoutInMillis: Int): List<String> {
val gatewayList = mutableListOf<String>()
while (true) {
val receivedData = ByteArray(12)
val packetToReceive = DatagramPacket(receivedData, receivedData.size)
socket.soTimeout = timeoutInMillis
try {
socket.receive(packetToReceive)
packetToReceive.address?.hostName?.let { gatewayList.add(it) }
} catch (socketTimeout: SocketTimeoutException) {
return gatewayList
}
}
}
}
And this the test (includes the MulticastServer):
class DefaultGatewayDiscovererTest {
#Test
fun `discover gateways should return a list of gateway IPs`() = with(MulticastServer()) {
start()
val list = runBlocking { GatewayDiscoverer.instance.discoverGateways(1000) }
close()
assertThat(list.size).isEqualTo(1)
assertThat(list).contains(InetAddress.getLocalHost().hostAddress)
Unit
}
}
/**
* A "MulticastServer" which will join the
* 239.255.255.250:1900 group to listen on SSDP events.
* They will report back with the same package
* it received.
*/
class MulticastServer : Thread(), Closeable {
private val group = InetAddress.getByName("239.255.255.250")
private val socket: MulticastSocket = MulticastSocket(1900)
init {
// This force to use IPv4...
var netinterface: NetworkInterface? = null
// Otherwise it will (at least on macOS) use IPv6 which leads to issues
// while joining the group...
val networkInterfaces = NetworkInterface.getNetworkInterfaces()
while (networkInterfaces.hasMoreElements()) {
val networkInterface = networkInterfaces.nextElement()
val addressesFromNetworkInterface = networkInterface.inetAddresses
while (addressesFromNetworkInterface.hasMoreElements()) {
val inetAddress = addressesFromNetworkInterface.nextElement()
if (inetAddress.isSiteLocalAddress
&& !inetAddress.isAnyLocalAddress
&& !inetAddress.isLinkLocalAddress
&& !inetAddress.isLoopbackAddress
&& !inetAddress.isMulticastAddress
) {
netinterface = NetworkInterface.getByName(networkInterface.name)
}
}
}
socket.joinGroup(InetSocketAddress("239.255.255.250", 1900), netinterface!!)
}
override fun run() {
while (true) {
val buf = ByteArray(256)
val packet = DatagramPacket(buf, buf.size)
try {
socket.receive(packet)
} catch (socketEx: SocketException) {
break
}
// Print for debugging
val message = String(packet.data, 0, packet.length)
println(message)
socket.send(packet)
}
}
override fun close() = with(socket) {
leaveGroup(group)
close()
}
}
When the test fails it fails on that line:
assertThat(list.size).isEqualTo(1)
The list is empty.
After some debugging I found out that the MulticastServer don't receive the message. Therefore the client don't get the response and add the IP address to the list.
I would expect that the MulticastServer will always work without that "flakiness". Do I something wrong with the implementation?

ViewController does not conform to protocol MSBandClientManagerDelegate

I am trying to use Swift to implement the Microsoft Band SDK. I keep getting this error when trying to set up my code.
class ViewController: UIViewController, UITableViewDelegate, UIImagePickerControllerDelegate, UINavigationControllerDelegate, MSBClientManagerDelegate, UIScrollViewDelegate {
I have never seen this before, but I have also never tried to convert an Objective C sample to Swift.
Any help would be appreciated!
EDIT: Here is the protocol from Objective C
#protocol MSBClientManagerDelegate<NSObject>
- (void)clientManager:(MSBClientManager *)clientManager clientDidConnect:(MSBClient *)client;
- (void)clientManager:(MSBClientManager *)clientManager clientDidDisconnect:(MSBClient *)client;
- (void)clientManager:(MSBClientManager *)clientManager client:(MSBClient *)client didFailToConnectWithError:(NSError *)error;
#end
EDIT 2: After using suggested Swift Helper class
This is how I am trying to set up the connection.
var clients:NSArray = bandHelper.attachedClients()!
var firstClient: MSBClient = clients[0] as MSBClient
if (clients.count == 0){
println("The band is not detected")
return
}
I have no clue how this should be set up
bandHelper.connectClient(firstClient, {completion: (connected:true -> void in)})
println("Please wait...connecting to band")
Then, when trying to send a photo to the band, this function does not work
bandHelper.client?.personalizationManager.updateMeTileImage(bandScaledImage, { (completionHandler: NSError!) -> Void in
NSLog("%#", NSError())})
I am getting thrown off by using the helper class. Any help would be appreciated!
Sample Project
I linked a sample Swift project for Microsoft Band Kit iOS that can send a haptic to the band. Find the link here: http://droolfactory.blogspot.com/2015/03/ios-swift-example-of-connecting-with.html
Microsoft Band Bridging Header
First to convert the Objective-C classes to be used with Swift, create a Bridging Header. Mine look like this for just the MicrosoftBandKit-iOS framework:
#ifndef ModuleName_Bridging_Header_h
#define ModuleName_Bridging_Header_h
#import <MicrosoftBandKit_iOS/MicrosoftBandKit_iOS.h>
#endif
Make sure to replace the ModuleName with the name of your apps Module. Find more on Bridging Header files at: https://developer.apple.com/library/ios/documentation/Swift/Conceptual/BuildingCocoaApps/MixandMatch.html
Band Helper Class
Next I wrapped the MSBClientManagerDelegate in a helper class (BandManager) which uses a singleton to manage the Band. I have a gist for it here (https://gist.github.com/mthistle/8f6eb30c68a918fc6240)
The code for this the gist is:
import Foundation
let kConnectionChangedNotification = "kConnectionChangedNotification"
let kConnectionFailedNotification = "kConnectionFailedNotification"
private let _SharedBandManagerInstance = BandManager()
class BandManager : NSObject, MSBClientManagerDelegate {
private(set) var client: MSBClient?
private var connectionBlock: ((Bool) -> ())?
private var discoveredClients = [MSBClient]()
private var clientManager = MSBClientManager.sharedManager()
class var sharedInstance: BandManager {
return _SharedBandManagerInstance
}
override init() {
super.init()
self.clientManager.delegate = self
}
func attachedClients() -> [MSBClient]? {
if let manager = self.clientManager {
self.discoveredClients = [MSBClient]()
for client in manager.attachedClients() {
self.discoveredClients.append(client as! MSBClient)
}
}
return self.discoveredClients
}
func disconnectClient(client: MSBClient) {
if (!client.isDeviceConnected) {
return;
}
if let manager = self.clientManager {
manager.cancelClientConnection(client)
self.client = nil
}
}
func connectClient(client: MSBClient, completion: (connected: Bool) -> Void) {
if (client.isDeviceConnected && self.client == client) {
if (self.connectionBlock != nil)
{
self.connectionBlock!(true)
}
return;
}
if let connectedClient = self.client {
self.disconnectClient(client)
}
self.connectionBlock = completion;
self.clientManager.connectClient(client)
}
func clientManager(clientManager: MSBClientManager!, clientDidConnect client: MSBClient!) {
if (self.connectionBlock != nil) {
self.client = client
self.connectionBlock!(true)
self.connectionBlock = nil
}
self.fireClientChangeNotification(client)
}
func clientManager(clientManager: MSBClientManager!, clientDidDisconnect client: MSBClient!) {
self.fireClientChangeNotification(client)
}
func clientManager(clientManager: MSBClientManager!, client: MSBClient!, didFailToConnectWithError error: NSError!) {
if error != nil {
println(error)
}
NSNotificationCenter.defaultCenter().postNotificationName(kConnectionFailedNotification, object: self, userInfo: ["client": client])
}
func fireClientChangeNotification(client: MSBClient) {
NSNotificationCenter.defaultCenter().postNotificationName(kConnectionChangedNotification, object: self, userInfo: ["client": client])
}
}

Adding observer for KVO without pointers using Swift

In Objective-C, I would normally use something like this:
static NSString *kViewTransformChanged = #"view transform changed";
// or
static const void *kViewTransformChanged = &kViewTransformChanged;
[clearContentView addObserver:self
forKeyPath:#"transform"
options:NSKeyValueObservingOptionNew
context:&kViewTransformChanged];
I have two overloaded methods to choose from to add an observer for KVO with the only difference being the context argument:
clearContentView.addObserver(observer: NSObject?, forKeyPath: String?, options: NSKeyValueObservingOptions, context: CMutableVoidPointer)
clearContentView.addObserver(observer: NSObject?, forKeyPath: String?, options: NSKeyValueObservingOptions, kvoContext: KVOContext)
With Swift not using pointers, I'm not sure how to dereference a pointer to use the first method.
If I create my own KVOContext constant for use with the second method, I wind up with it asking for this:
let test:KVOContext = KVOContext.fromVoidContext(context: CMutableVoidPointer)
EDIT: What is the difference between CMutableVoidPointer and KVOContext? Can someone give me an example how how to use them both and when I would use one over the other?
EDIT #2: A dev at Apple just posted this to the forums: KVOContext is going away; using a global reference as your context is the way to go right now.
There is now a technique officially recommended in the documentation, which is to create a private mutable variable and use its address as the context.
(Updated for Swift 3 on 2017-01-09)
// Set up non-zero-sized storage. We don't intend to mutate this variable,
// but it needs to be `var` so we can pass its address in as UnsafeMutablePointer.
private static var myContext = 0
// NOTE: `static` is not necessary if you want it to be a global variable
observee.addObserver(self, forKeyPath: …, options: [], context: &MyClass.myContext)
override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey: Any]?, context: UnsafeMutableRawPointer?) {
if context == &myContext {
…
}
else {
super.observeValue(forKeyPath: keyPath, of: object, change: change, context: context)
}
}
Now that KVOContext is gone in Xcode 6 beta 3, you can do the following. Define a global (i.e. not a class property) like so:
let myContext = UnsafePointer<()>()
Add an observer:
observee.addObserver(observer, forKeyPath: …, options: nil, context: myContext)
In the observer:
override func observeValueForKeyPath(keyPath: String!, ofObject object: AnyObject!, change: [NSObject : AnyObject]!, context: UnsafePointer<()>) {
if context == myContext {
…
} else {
super.observeValueForKeyPath(keyPath, ofObject: object, change: change, context: context)
}
}
Swift 4 - observing contentSize change on UITableViewController popover to fix incorrect size
I had been searching for an answer to change to a block based KVO because I was getting a swiftlint warning and it took me piecing quite a few different answers together to get to the right solution. Swiftlint warning:
Block Based KVO Violation: Prefer the new block based KVO API with keypaths when using Swift 3.2 or later. (block_based_kvo).
My use case was to present a popover controller attached to a button in a Nav bar in a view controller and then resize the popover once it's showing - otherwise it would be too big and not fitting the contents of the popover. The popover itself was a UITableViewController that contained static cells, and it was displayed via a Storyboard segue with style popover.
To setup the block based observer, you need the following code inside your popover UITableViewController:
// class level variable to store the statusObserver
private var statusObserver: NSKeyValueObservation?
// Create the observer inside viewWillAppear
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
statusObserver = tableView.observe(\UITableView.contentSize,
changeHandler: { [ weak self ] (theTableView, _) in self?.popoverPresentationController?.presentedViewController.preferredContentSize = theTableView.contentSize
})
}
// Don't forget to remove the observer when the popover is dismissed.
override func viewDidDisappear(_ animated: Bool) {
if let observer = statusObserver {
observer.invalidate()
statusObserver = nil
}
super.viewDidDisappear(animated)
}
I didn't need the previous value when the observer was triggered, so left out the options: [.new, .old] when creating the observer.
Update for Swift 4
Context is not required for block-based observer function and existing #keyPath() syntax is replaced with smart keypath to achieve swift type safety.
class EventOvserverDemo {
var statusObserver:NSKeyValueObservation?
var objectToObserve:UIView?
func registerAddObserver() -> Void {
statusObserver = objectToObserve?.observe(\UIView.tag, options: [.new, .old], changeHandler: {[weak self] (player, change) in
if let tag = change.newValue {
// observed changed value and do the task here on change.
}
})
}
func unregisterObserver() -> Void {
if let sObserver = statusObserver {
sObserver.invalidate()
statusObserver = nil
}
}
}
Complete example using Swift:
//
// AppDelegate.swift
// Photos-MediaFramework-swift
//
// Created by Phurg on 11/11/16.
//
// Displays URLs for all photos in Photos Library
//
// #see http://stackoverflow.com/questions/30144547/programmatic-access-to-the-photos-library-on-mac-os-x-photokit-photos-framewo
//
import Cocoa
import MediaLibrary
// For KVO: https://developer.apple.com/library/content/documentation/Swift/Conceptual/BuildingCocoaApps/AdoptingCocoaDesignPatterns.html#//apple_ref/doc/uid/TP40014216-CH7-ID12
private var mediaLibraryLoaded = 1
private var rootMediaGroupLoaded = 2
private var mediaObjectsLoaded = 3
#NSApplicationMain
class AppDelegate: NSObject, NSApplicationDelegate {
#IBOutlet weak var window: NSWindow!
var mediaLibrary : MLMediaLibrary!
var allPhotosAlbum : MLMediaGroup!
func applicationDidFinishLaunching(_ aNotification: Notification) {
NSLog("applicationDidFinishLaunching:");
let options:[String:Any] = [
MLMediaLoadSourceTypesKey: MLMediaSourceType.image.rawValue, // Can't be Swift enum
MLMediaLoadIncludeSourcesKey: [MLMediaSourcePhotosIdentifier], // Array
]
self.mediaLibrary = MLMediaLibrary(options:options)
NSLog("applicationDidFinishLaunching: mediaLibrary=%#", self.mediaLibrary);
self.mediaLibrary.addObserver(self, forKeyPath:"mediaSources", options:[], context:&mediaLibraryLoaded)
NSLog("applicationDidFinishLaunching: added mediaSources observer");
// Force load
self.mediaLibrary.mediaSources?[MLMediaSourcePhotosIdentifier]
NSLog("applicationDidFinishLaunching: done");
}
override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) {
NSLog("observeValue: keyPath=%#", keyPath!)
let mediaSource:MLMediaSource = self.mediaLibrary.mediaSources![MLMediaSourcePhotosIdentifier]!
if (context == &mediaLibraryLoaded) {
NSLog("observeValue: mediaLibraryLoaded")
mediaSource.addObserver(self, forKeyPath:"rootMediaGroup", options:[], context:&rootMediaGroupLoaded)
// Force load
mediaSource.rootMediaGroup
} else if (context == &rootMediaGroupLoaded) {
NSLog("observeValue: rootMediaGroupLoaded")
let albums:MLMediaGroup = mediaSource.mediaGroup(forIdentifier:"TopLevelAlbums")!
for album in albums.childGroups! {
let albumIdentifier:String = album.attributes["identifier"] as! String
if (albumIdentifier == "allPhotosAlbum") {
self.allPhotosAlbum = album
album.addObserver(self, forKeyPath:"mediaObjects", options:[], context:&mediaObjectsLoaded)
// Force load
album.mediaObjects
}
}
} else if (context == &mediaObjectsLoaded) {
NSLog("observeValue: mediaObjectsLoaded")
let mediaObjects:[MLMediaObject] = self.allPhotosAlbum.mediaObjects!
for mediaObject in mediaObjects {
let url:URL? = mediaObject.url
// URL does not extend NSObject, so can't be passed to NSLog; use string interpolation
NSLog("%#", "\(url)")
}
}
}
}

Resources