I am using firebase_ml_vision package for Text Recognition. It is working on Android side but when i test it real IOS device taken photo with image_picker plugin doesn't recognize texts. I tried convert to image Unit8 and same problem not reading image.
Do i need convert image something?
Here my image picker function
Future<Null> getImage(ImageSource source) async {
try {
var image = await ImagePicker.pickImage(source: source);
var uuid = new Uuid();
// Step 3: Get directory where we can duplicate selected file.
Directory directory = await getApplicationDocumentsDirectory();
String path = directory.path;
String pathVar = path + '/' + uuid.v1() + '.png';
await image.copy(pathVar);
if (image != null) {
_getImageSize(image);
_imageFromGallery = image;
_isLoaded = true;
notifyListeners();
} else {
Flushbar(
message: "Lütfen Resim Seçiniz",
);
}
} catch (e) {
_isLoaded = false;
print(e.toString());
notifyListeners();
}
}
Here is my ml_vision recognize texts function.
Future readText(BuildContext context) async {
FirebaseVisionImage ourImage = FirebaseVisionImage.fromFile(
Provider.of<CameraProvider>(context).imageFromSource);
TextRecognizer recognizeText = FirebaseVision.instance.textRecognizer();
VisionText readText = await recognizeText.processImage(ourImage);
List<TextLine> lines = List();
List<TextElement> words = List();
List<TextBlock> box = List();
try {
for (TextBlock block in readText.blocks) {
if (block.text != null) {
box.add(block);
}
_textBox = box;
notifyListeners();
for (TextLine line in block.lines) {
if (line.text != null) {
lines.add(line);
}
_textLines = lines;
notifyListeners();
for (TextElement word in line.elements) {
if (word.text != null) {
words.add(word);
}
_textWords = words;
notifyListeners();
}
}
}
} catch (e) {
print(e);
}
}
Did you include the line
in the
ios/Podfile
and runpod update
in theios/
? The results would return empty if you didn't.