crate version maybe out of date compared to example #70
-
I am coding from scratch instead of cloning the entire repo, so this is my code so far: use std::{collections::VecDeque, path::PathBuf};
use image;
use ocrs::{OcrEngine, OcrEngineParams};
use rten;
const SAMPLE_CONTENTS_DIR: &str = "testing_images";
const MODELS_DIR: &str = "models";
mod ocrs_extras;
/// Read a file from a path that is relative to the crate root.
//fn read_file(path: &str) -> Result<Vec<u8>, std::io::Error>
fn read_file(path: &str) -> Vec<u8>
{
//let mut abs_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let mut abs_path = PathBuf::new();
abs_path.push(path);
return std::fs::read(abs_path).unwrap();
}
struct Args
{
image: String,
}
//fn parse_args() -> Result<Args, lexopt::Error>
fn parse_args() -> Result<Args, lexopt::Error>
{
use lexopt::prelude::*;
let mut values = VecDeque::new();
let mut parser = lexopt::Parser::from_env();
while let Some(arg) = parser.next()?
{
match arg
{
Value(val) => values.push_back(val.string()?),
Long("help") =>
{
println!(
"Usage: {bin_name} <image>",
bin_name = parser.bin_name().unwrap_or("hello_ocrs")
);
std::process::exit(0);
}
_ => return Err(arg.unexpected()),
}
}
let image = values.pop_front().ok_or("missing `image` arg")?;
Ok(Args{image})
}
fn main()
{
//let args = parse_args().unwrap();
//let img = Image::from_path(&format!("{}/large_sign.jpg", SAMPLE_CONTENTS_DIR)).unwrap();
//let img = Image::from_path(&format!("{}/mobile_speed.jpeg", SAMPLE_CONTENTS_DIR)).unwrap();
//let img = Image::from_path(&format!("{}/small_sign.jpeg", SAMPLE_CONTENTS_DIR)).unwrap();
let detection_model_data = read_file(&format!("{}/text-detection.rten", MODELS_DIR));
let rec_model_data = read_file(&format!("{}/text-recognition.rten", MODELS_DIR));
let detection_model = rten::Model::load(&detection_model_data).unwrap();
let recognition_model = rten::Model::load(&rec_model_data).unwrap();
let engine = OcrEngine::new(OcrEngineParams
{
detection_model: Some(detection_model),
recognition_model: Some(recognition_model),
..Default::default()
}).unwrap();
let img = image::open(&format!("{}/image.jpeg", SAMPLE_CONTENTS_DIR)).map(|image| image.into_rgb8()).unwrap();
//ocrs_extras::preprocess::test();
let img_source = ocrs_extras::preprocess::ImageSource::from_bytes(img.as_raw(), img.dimensions()).unwrap();
let ocr_input = engine.prepare_input(img_source).unwrap(); // Error
} However on this line:
error[E0308]: mismatched types
--> src/main.rs:77:42
|
77 | let ocr_input = engine.prepare_input(img_source).unwrap();
| ------------- ^^^^^^^^^^ expected `TensorBase<f32, &[f32], NdLayout<3>>`, found `ImageSource<'_>`
| |
| arguments to this method are incorrect
|
= note: expected struct `TensorBase<f32, &[f32], NdLayout<3>>`
found struct `ImageSource<'_>` I pretty much copied most of the code. Inside Line 87 in ea67c9e This is the code: /// Preprocess an image for use with other methods of the engine.
pub fn prepare_input(&self, image: ImageSource) -> anyhow::Result<OcrInput> {
Ok(OcrInput {
image: prepare_image(image),
})
} which is only taking please note inside |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 3 replies
-
There have indeed been some API changes on the |
Beta Was this translation helpful? Give feedback.
I would suggest to use the code from the older version of the example I linked above to get started. That will work with the published version of the
ocrs
crate. I will publish a new version of the crate soon, hopefully some time in the next week.