mediversal-rn-image-intelligence 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/LICENSE +0 -0
  2. package/README.md +361 -0
  3. package/android/.gradle/8.9/checksums/checksums.lock +0 -0
  4. package/android/.gradle/8.9/dependencies-accessors/gc.properties +0 -0
  5. package/android/.gradle/8.9/executionHistory/executionHistory.lock +0 -0
  6. package/android/.gradle/8.9/fileChanges/last-build.bin +0 -0
  7. package/android/.gradle/8.9/fileHashes/fileHashes.lock +0 -0
  8. package/android/.gradle/8.9/gc.properties +0 -0
  9. package/android/.gradle/buildOutputCleanup/buildOutputCleanup.lock +0 -0
  10. package/android/.gradle/buildOutputCleanup/cache.properties +2 -0
  11. package/android/.gradle/vcs-1/gc.properties +0 -0
  12. package/android/build.gradle +71 -0
  13. package/android/src/main/AndroidManifest.xml +10 -0
  14. package/android/src/main/java/com/mediversalrnimagintelligence/FaceDetectionModule.kt +147 -0
  15. package/android/src/main/java/com/mediversalrnimagintelligence/HandwritingRecognitionModule.kt +74 -0
  16. package/android/src/main/java/com/mediversalrnimagintelligence/ImageIntelligencePackage.kt +20 -0
  17. package/android/src/main/java/com/mediversalrnimagintelligence/TextRecognitionModule.kt +86 -0
  18. package/ios/FaceDetectionModule.m +16 -0
  19. package/ios/FaceDetectionModule.swift +164 -0
  20. package/ios/HandwritingRecognitionModule.m +14 -0
  21. package/ios/HandwritingRecognitionModule.swift +53 -0
  22. package/ios/TextRecognitionModule.m +14 -0
  23. package/ios/TextRecognitionModule.swift +102 -0
  24. package/lib/commonjs/NativeFaceDetectionModule.js +12 -0
  25. package/lib/commonjs/NativeFaceDetectionModule.js.map +1 -0
  26. package/lib/commonjs/NativeHandwritingRecognitionModule.js +12 -0
  27. package/lib/commonjs/NativeHandwritingRecognitionModule.js.map +1 -0
  28. package/lib/commonjs/NativeTextRecognitionModule.js +12 -0
  29. package/lib/commonjs/NativeTextRecognitionModule.js.map +1 -0
  30. package/lib/commonjs/index.js +194 -0
  31. package/lib/commonjs/index.js.map +1 -0
  32. package/lib/commonjs/types.js +2 -0
  33. package/lib/commonjs/types.js.map +1 -0
  34. package/lib/module/NativeFaceDetectionModule.js +8 -0
  35. package/lib/module/NativeFaceDetectionModule.js.map +1 -0
  36. package/lib/module/NativeHandwritingRecognitionModule.js +8 -0
  37. package/lib/module/NativeHandwritingRecognitionModule.js.map +1 -0
  38. package/lib/module/NativeTextRecognitionModule.js +8 -0
  39. package/lib/module/NativeTextRecognitionModule.js.map +1 -0
  40. package/lib/module/index.js +186 -0
  41. package/lib/module/index.js.map +1 -0
  42. package/lib/module/types.js +2 -0
  43. package/lib/module/types.js.map +1 -0
  44. package/lib/typescript/NativeFaceDetectionModule.d.ts +11 -0
  45. package/lib/typescript/NativeFaceDetectionModule.d.ts.map +1 -0
  46. package/lib/typescript/NativeHandwritingRecognitionModule.d.ts +11 -0
  47. package/lib/typescript/NativeHandwritingRecognitionModule.d.ts.map +1 -0
  48. package/lib/typescript/NativeTextRecognitionModule.d.ts +11 -0
  49. package/lib/typescript/NativeTextRecognitionModule.d.ts.map +1 -0
  50. package/lib/typescript/index.d.ts +44 -0
  51. package/lib/typescript/index.d.ts.map +1 -0
  52. package/lib/typescript/types.d.ts +91 -0
  53. package/lib/typescript/types.d.ts.map +1 -0
  54. package/mediversal-rn-image-intelligence.podspec +0 -0
  55. package/package.json +157 -0
  56. package/src/NativeFaceDetectionModule.ts +18 -0
  57. package/src/NativeHandwritingRecognitionModule.ts +16 -0
  58. package/src/NativeTextRecognitionModule.ts +14 -0
  59. package/src/index.tsx +243 -0
  60. package/src/types.ts +96 -0
@@ -0,0 +1,20 @@
1
+ package com.mediversalrnimagintelligence
2
+
3
+ import com.facebook.react.ReactPackage
4
+ import com.facebook.react.bridge.NativeModule
5
+ import com.facebook.react.bridge.ReactApplicationContext
6
+ import com.facebook.react.uimanager.ViewManager
7
+
8
+ class ImageIntelligencePackage : ReactPackage {
9
+ override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
10
+ return listOf(
11
+ FaceDetectionModule(reactContext),
12
+ TextRecognitionModule(reactContext),
13
+ HandwritingRecognitionModule(reactContext)
14
+ )
15
+ }
16
+
17
+ override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
18
+ return emptyList()
19
+ }
20
+ }
@@ -0,0 +1,86 @@
1
+ package com.mediversalrnimagintelligence
2
+
3
+ import android.net.Uri
4
+ import com.facebook.react.bridge.Arguments
5
+ import com.facebook.react.bridge.Promise
6
+ import com.facebook.react.bridge.ReactApplicationContext
7
+ import com.facebook.react.bridge.ReactContextBaseJavaModule
8
+ import com.facebook.react.bridge.ReactMethod
9
+ import com.facebook.react.bridge.WritableMap
10
+ import com.google.mlkit.vision.common.InputImage
11
+ import com.google.mlkit.vision.text.TextRecognition
12
+ import com.google.mlkit.vision.text.latin.TextRecognizerOptions
13
+ import java.io.IOException
14
+
15
+ class TextRecognitionModule(reactContext: ReactApplicationContext) :
16
+ ReactContextBaseJavaModule(reactContext) {
17
+
18
+ override fun getName(): String {
19
+ return "TextRecognitionModule"
20
+ }
21
+
22
+ @ReactMethod
23
+ fun recognizeText(imageUri: String, promise: Promise) {
24
+ try {
25
+ // Parse the image from URI
26
+ val image = parseImageFromUri(imageUri)
27
+
28
+ // Get text recognizer instance
29
+ val recognizer = TextRecognition.getClient(TextRecognizerOptions.DEFAULT_OPTIONS)
30
+
31
+ // Process the image
32
+ recognizer.process(image)
33
+ .addOnSuccessListener { visionText ->
34
+ val result = Arguments.createMap()
35
+ val text = visionText.text
36
+
37
+ if (text.isNotEmpty()) {
38
+ result.putString("text", text)
39
+ } else {
40
+ result.putString("text", "")
41
+ }
42
+
43
+ promise.resolve(result)
44
+ }
45
+ .addOnFailureListener { e ->
46
+ val result = Arguments.createMap()
47
+ result.putString("text", "")
48
+ result.putString("error", e.message ?: "Text recognition failed")
49
+ promise.resolve(result)
50
+ }
51
+
52
+ } catch (e: Exception) {
53
+ val result = Arguments.createMap()
54
+ result.putString("text", "")
55
+ result.putString("error", e.message ?: "Failed to process image")
56
+ promise.resolve(result)
57
+ }
58
+ }
59
+
60
+ private fun parseImageFromUri(uriString: String): InputImage {
61
+ val context = reactApplicationContext
62
+
63
+ return try {
64
+ when {
65
+ uriString.startsWith("file://") -> {
66
+ val uri = Uri.parse(uriString)
67
+ InputImage.fromFilePath(context, uri)
68
+ }
69
+ uriString.startsWith("content://") -> {
70
+ val uri = Uri.parse(uriString)
71
+ InputImage.fromFilePath(context, uri)
72
+ }
73
+ uriString.startsWith("/") -> {
74
+ // Absolute file path
75
+ val uri = Uri.parse("file://$uriString")
76
+ InputImage.fromFilePath(context, uri)
77
+ }
78
+ else -> {
79
+ throw IOException("Unsupported URI format: $uriString")
80
+ }
81
+ }
82
+ } catch (e: IOException) {
83
+ throw IOException("Failed to load image from URI: ${e.message}")
84
+ }
85
+ }
86
+ }
@@ -0,0 +1,16 @@
1
+ #import <React/RCTBridgeModule.h>
2
+
3
+ @interface RCT_EXTERN_MODULE(FaceDetectionModule, NSObject)
4
+
5
+ RCT_EXTERN_METHOD(detectFaces:(NSString *)imageUri
6
+ mode:(NSString *)mode
7
+ minFaceSize:(nonnull NSNumber *)minFaceSize
8
+ resolver:(RCTPromiseResolveBlock)resolve
9
+ rejecter:(RCTPromiseRejectBlock)reject)
10
+
11
+ + (BOOL)requiresMainQueueSetup
12
+ {
13
+ return NO;
14
+ }
15
+
16
+ @end
@@ -0,0 +1,164 @@
1
+ import Foundation
2
+ import MLKitFaceDetection
3
+ import MLKitVision
4
+
5
+ @objc(FaceDetectionModule)
6
+ class FaceDetectionModule: NSObject {
7
+
8
+ @objc
9
+ func detectFaces(
10
+ _ imageUri: String,
11
+ mode: String,
12
+ minFaceSize: NSNumber,
13
+ resolver resolve: @escaping RCTPromiseResolveBlock,
14
+ rejecter reject: @escaping RCTPromiseRejectBlock
15
+ ) {
16
+ DispatchQueue.global(qos: .userInitiated).async {
17
+ do {
18
+ // Parse image from URI
19
+ guard let image = self.parseImageFromUri(imageUri) else {
20
+ let result: [String: Any] = [
21
+ "faces": [],
22
+ "error": "Failed to load image from URI"
23
+ ]
24
+ DispatchQueue.main.async {
25
+ resolve(result)
26
+ }
27
+ return
28
+ }
29
+
30
+ let visionImage = VisionImage(image: image)
31
+ visionImage.orientation = image.imageOrientation
32
+
33
+ // Configure face detector
34
+ let options = FaceDetectorOptions()
35
+
36
+ if mode == "accurate" {
37
+ options.performanceMode = .accurate
38
+ options.landmarkMode = .all
39
+ options.classificationMode = .all
40
+ options.isTrackingEnabled = true
41
+ } else {
42
+ options.performanceMode = .fast
43
+ options.classificationMode = .all
44
+ }
45
+
46
+ options.minFaceSize = CGFloat(truncating: minFaceSize)
47
+
48
+ let faceDetector = FaceDetector.faceDetector(options: options)
49
+
50
+ // Detect faces
51
+ faceDetector.process(visionImage) { faces, error in
52
+ if let error = error {
53
+ let result: [String: Any] = [
54
+ "faces": [],
55
+ "error": error.localizedDescription
56
+ ]
57
+ DispatchQueue.main.async {
58
+ resolve(result)
59
+ }
60
+ return
61
+ }
62
+
63
+ guard let faces = faces, !faces.isEmpty else {
64
+ let result: [String: Any] = ["faces": []]
65
+ DispatchQueue.main.async {
66
+ resolve(result)
67
+ }
68
+ return
69
+ }
70
+
71
+ // Convert faces to array of dictionaries
72
+ let facesArray = faces.map { self.convertFaceToDict($0) }
73
+ let result: [String: Any] = ["faces": facesArray]
74
+
75
+ DispatchQueue.main.async {
76
+ resolve(result)
77
+ }
78
+ }
79
+
80
+ } catch {
81
+ let result: [String: Any] = [
82
+ "faces": [],
83
+ "error": error.localizedDescription
84
+ ]
85
+ DispatchQueue.main.async {
86
+ resolve(result)
87
+ }
88
+ }
89
+ }
90
+ }
91
+
92
+ private func parseImageFromUri(_ uriString: String) -> UIImage? {
93
+ var path = uriString
94
+
95
+ // Remove file:// prefix if present
96
+ if path.hasPrefix("file://") {
97
+ path = String(path.dropFirst(7))
98
+ }
99
+
100
+ // Handle different URI formats
101
+ if path.hasPrefix("ph://") || path.hasPrefix("assets-library://") {
102
+ // Photo library assets - would need PHAsset loading
103
+ // For simplicity, return nil here (would need Photos framework integration)
104
+ return nil
105
+ }
106
+
107
+ // Load from file path
108
+ if FileManager.default.fileExists(atPath: path) {
109
+ return UIImage(contentsOfFile: path)
110
+ }
111
+
112
+ return nil
113
+ }
114
+
115
+ private func convertFaceToDict(_ face: Face) -> [String: Any] {
116
+ var faceDict: [String: Any] = [:]
117
+
118
+ // Bounding box
119
+ let frame = face.frame
120
+ let boundingBox: [String: Any] = [
121
+ "x": Int(frame.origin.x),
122
+ "y": Int(frame.origin.y),
123
+ "width": Int(frame.size.width),
124
+ "height": Int(frame.size.height)
125
+ ]
126
+ faceDict["boundingBox"] = boundingBox
127
+
128
+ // Smiling probability
129
+ if face.hasSmilingProbability {
130
+ faceDict["smilingProbability"] = face.smilingProbability
131
+ }
132
+
133
+ // Left eye open probability
134
+ if face.hasLeftEyeOpenProbability {
135
+ faceDict["leftEyeOpenProbability"] = face.leftEyeOpenProbability
136
+ }
137
+
138
+ // Right eye open probability
139
+ if face.hasRightEyeOpenProbability {
140
+ faceDict["rightEyeOpenProbability"] = face.rightEyeOpenProbability
141
+ }
142
+
143
+ // Head Euler angles
144
+ if face.hasHeadEulerAngleY {
145
+ faceDict["headEulerAngleY"] = face.headEulerAngleY
146
+ }
147
+
148
+ if face.hasHeadEulerAngleZ {
149
+ faceDict["headEulerAngleZ"] = face.headEulerAngleZ
150
+ }
151
+
152
+ // Tracking ID
153
+ if face.hasTrackingID {
154
+ faceDict["trackingId"] = face.trackingID
155
+ }
156
+
157
+ return faceDict
158
+ }
159
+
160
+ @objc
161
+ static func requiresMainQueueSetup() -> Bool {
162
+ return false
163
+ }
164
+ }
@@ -0,0 +1,14 @@
1
+ #import <React/RCTBridgeModule.h>
2
+
3
+ @interface RCT_EXTERN_MODULE(HandwritingRecognitionModule, NSObject)
4
+
5
+ RCT_EXTERN_METHOD(recognizeHandwriting:(NSString *)imageUri
6
+ resolver:(RCTPromiseResolveBlock)resolve
7
+ rejecter:(RCTPromiseRejectBlock)reject)
8
+
9
+ + (BOOL)requiresMainQueueSetup
10
+ {
11
+ return NO;
12
+ }
13
+
14
+ @end
@@ -0,0 +1,53 @@
1
+ import Foundation
2
+ import MLKitDigitalInkRecognition
3
+ import MLKitVision
4
+
5
+ @objc(HandwritingRecognitionModule)
6
+ class HandwritingRecognitionModule: NSObject {
7
+
8
+ @objc
9
+ func recognizeHandwriting(
10
+ _ imageUri: String,
11
+ resolver resolve: @escaping RCTPromiseResolveBlock,
12
+ rejecter reject: @escaping RCTPromiseRejectBlock
13
+ ) {
14
+ DispatchQueue.main.async {
15
+ // Note: Digital Ink Recognition in ML Kit is designed for real-time stroke data,
16
+ // not static images. For production use with static images, you would need to:
17
+ // 1. Use a different API (like Cloud Vision API's handwriting detection)
18
+ // 2. Or pre-process the image to extract strokes
19
+ //
20
+ // For this implementation, we'll provide a graceful fallback indicating
21
+ // that handwriting recognition from static images is not fully supported
22
+ // by the on-device Digital Ink API.
23
+
24
+ let result: [String: Any] = [
25
+ "text": "",
26
+ "error": "Digital Ink Recognition requires stroke data, not static images. " +
27
+ "For handwriting in static images, consider using Cloud Vision API or " +
28
+ "preprocessing to extract stroke information."
29
+ ]
30
+ resolve(result)
31
+
32
+ // Alternative implementation note:
33
+ // If you have stroke data (from a drawing canvas), the proper implementation would be:
34
+ // 1. Create an Ink object from strokes
35
+ // 2. Get recognizer with appropriate model identifier
36
+ // 3. Call recognizer.recognize(ink)
37
+ //
38
+ // Example (commented out as it requires stroke data):
39
+ // let identifier = DigitalInkRecognitionModelIdentifier(forLanguageTag: "en-US")
40
+ // let model = DigitalInkRecognitionModel.modelIdentifier(identifier!)
41
+ // let options = DigitalInkRecognizerOptions(model: model)
42
+ // let recognizer = DigitalInkRecognizer.digitalInkRecognizer(options: options)
43
+ // recognizer.recognize(ink: ink) { result, error in
44
+ // // Process recognition candidates
45
+ // }
46
+ }
47
+ }
48
+
49
+ @objc
50
+ static func requiresMainQueueSetup() -> Bool {
51
+ return false
52
+ }
53
+ }
@@ -0,0 +1,14 @@
1
+ #import <React/RCTBridgeModule.h>
2
+
3
+ @interface RCT_EXTERN_MODULE(TextRecognitionModule, NSObject)
4
+
5
+ RCT_EXTERN_METHOD(recognizeText:(NSString *)imageUri
6
+ resolver:(RCTPromiseResolveBlock)resolve
7
+ rejecter:(RCTPromiseRejectBlock)reject)
8
+
9
+ + (BOOL)requiresMainQueueSetup
10
+ {
11
+ return NO;
12
+ }
13
+
14
+ @end
@@ -0,0 +1,102 @@
1
+ import Foundation
2
+ import MLKitTextRecognition
3
+ import MLKitVision
4
+
5
+ @objc(TextRecognitionModule)
6
+ class TextRecognitionModule: NSObject {
7
+
8
+ @objc
9
+ func recognizeText(
10
+ _ imageUri: String,
11
+ resolver resolve: @escaping RCTPromiseResolveBlock,
12
+ rejecter reject: @escaping RCTPromiseRejectBlock
13
+ ) {
14
+ DispatchQueue.global(qos: .userInitiated).async {
15
+ do {
16
+ // Parse image from URI
17
+ guard let image = self.parseImageFromUri(imageUri) else {
18
+ let result: [String: Any] = [
19
+ "text": "",
20
+ "error": "Failed to load image from URI"
21
+ ]
22
+ DispatchQueue.main.async {
23
+ resolve(result)
24
+ }
25
+ return
26
+ }
27
+
28
+ let visionImage = VisionImage(image: image)
29
+ visionImage.orientation = image.imageOrientation
30
+
31
+ // Create text recognizer
32
+ let options = TextRecognizerOptions()
33
+ let textRecognizer = TextRecognizer.textRecognizer(options: options)
34
+
35
+ // Recognize text
36
+ textRecognizer.process(visionImage) { result, error in
37
+ if let error = error {
38
+ let errorResult: [String: Any] = [
39
+ "text": "",
40
+ "error": error.localizedDescription
41
+ ]
42
+ DispatchQueue.main.async {
43
+ resolve(errorResult)
44
+ }
45
+ return
46
+ }
47
+
48
+ guard let result = result else {
49
+ let emptyResult: [String: Any] = ["text": ""]
50
+ DispatchQueue.main.async {
51
+ resolve(emptyResult)
52
+ }
53
+ return
54
+ }
55
+
56
+ let recognizedText = result.text
57
+ let successResult: [String: Any] = ["text": recognizedText]
58
+
59
+ DispatchQueue.main.async {
60
+ resolve(successResult)
61
+ }
62
+ }
63
+
64
+ } catch {
65
+ let result: [String: Any] = [
66
+ "text": "",
67
+ "error": error.localizedDescription
68
+ ]
69
+ DispatchQueue.main.async {
70
+ resolve(result)
71
+ }
72
+ }
73
+ }
74
+ }
75
+
76
+ private func parseImageFromUri(_ uriString: String) -> UIImage? {
77
+ var path = uriString
78
+
79
+ // Remove file:// prefix if present
80
+ if path.hasPrefix("file://") {
81
+ path = String(path.dropFirst(7))
82
+ }
83
+
84
+ // Handle different URI formats
85
+ if path.hasPrefix("ph://") || path.hasPrefix("assets-library://") {
86
+ // Photo library assets - would need PHAsset loading
87
+ return nil
88
+ }
89
+
90
+ // Load from file path
91
+ if FileManager.default.fileExists(atPath: path) {
92
+ return UIImage(contentsOfFile: path)
93
+ }
94
+
95
+ return nil
96
+ }
97
+
98
+ @objc
99
+ static func requiresMainQueueSetup() -> Bool {
100
+ return false
101
+ }
102
+ }
@@ -0,0 +1,12 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.default = void 0;
7
+ var _reactNative = require("react-native");
8
+ /**
9
+ * Face Detection TurboModule Specification
10
+ */
11
+ var _default = exports.default = _reactNative.TurboModuleRegistry.getEnforcing('FaceDetectionModule');
12
+ //# sourceMappingURL=NativeFaceDetectionModule.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["_reactNative","require","_default","exports","default","TurboModuleRegistry","getEnforcing"],"sourceRoot":"../../src","sources":["NativeFaceDetectionModule.ts"],"mappings":";;;;;;AACA,IAAAA,YAAA,GAAAC,OAAA;AAGA;AACA;AACA;AAFA,IAAAC,QAAA,GAAAC,OAAA,CAAAC,OAAA,GAWeC,gCAAmB,CAACC,YAAY,CAC7C,qBACF,CAAC","ignoreList":[]}
@@ -0,0 +1,12 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.default = void 0;
7
+ var _reactNative = require("react-native");
8
+ /**
9
+ * Handwriting Recognition TurboModule Specification
10
+ */
11
+ var _default = exports.default = _reactNative.TurboModuleRegistry.getEnforcing('HandwritingRecognitionModule');
12
+ //# sourceMappingURL=NativeHandwritingRecognitionModule.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["_reactNative","require","_default","exports","default","TurboModuleRegistry","getEnforcing"],"sourceRoot":"../../src","sources":["NativeHandwritingRecognitionModule.ts"],"mappings":";;;;;;AACA,IAAAA,YAAA,GAAAC,OAAA;AAGA;AACA;AACA;AAFA,IAAAC,QAAA,GAAAC,OAAA,CAAAC,OAAA,GASeC,gCAAmB,CAACC,YAAY,CAC7C,8BACF,CAAC","ignoreList":[]}
@@ -0,0 +1,12 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.default = void 0;
7
+ var _reactNative = require("react-native");
8
+ /**
9
+ * Text Recognition TurboModule Specification
10
+ */
11
+ var _default = exports.default = _reactNative.TurboModuleRegistry.getEnforcing('TextRecognitionModule');
12
+ //# sourceMappingURL=NativeTextRecognitionModule.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["_reactNative","require","_default","exports","default","TurboModuleRegistry","getEnforcing"],"sourceRoot":"../../src","sources":["NativeTextRecognitionModule.ts"],"mappings":";;;;;;AACA,IAAAA,YAAA,GAAAC,OAAA;AAGA;AACA;AACA;AAFA,IAAAC,QAAA,GAAAC,OAAA,CAAAC,OAAA,GAOeC,gCAAmB,CAACC,YAAY,CAC7C,uBACF,CAAC","ignoreList":[]}