How to get Rust sqlx sqlite query to work? - sqlite

main.rs:
#[async_std::main]
async fn main() -> Result<(),sqlx::Error> {
use sqlx::Connect;
let mut conn = sqlx::SqliteConnection::connect("sqlite:///home/ace/hello_world/test.db").await?;
let row = sqlx::query!("SELECT * FROM tbl").fetch_all(&conn).await?;
println!("{}{}",row.0,row.1);
Ok(())
}
Cargo.toml:
[package]
name = "hello_world"
version = "0.1.0"
authors = ["ace"]
edition = "2018"
[dependencies]
async-std = {version = "1", features = ["attributes"]}
sqlx = { version="0.3.5", default-features=false, features=["runtime-async-std","macros","sqlite"] }
bash session:
ace#SLAB:~/hello_world$ sqlite test.db
SQLite version 2.8.17
Enter ".help" for instructions
sqlite> create table tbl ( num integer, chr varchar );
sqlite> insert into tbl values (1,'ok');
sqlite> .quit
ace#SLAB:~/hello_world$ pwd
/home/ace/hello_world
ace#SLAB:~/hello_world$ export DATABASE_URL=sqlite:///home/ace/hello_world/test.db
ace#SLAB:~/hello_world$ cargo run
Compiling hello_world v0.1.0 (/home/ace/hello_world)
error: failed to connect to database: file is not a database
--> src/main.rs:8:12
|
8 | let row = sqlx::query!("SELECT * FROM tbl").fetch_all(&conn).await?;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info)
error: aborting due to previous error
error: could not compile `hello_world`.
To learn more, run the command again with --verbose.
ace#SLAB:~/hello_world$ rustc --version
rustc 1.44.0 (49cae5576 2020-06-01)
ace#SLAB:~/hello_world$ uname -r
5.4.0-33-generic
ace#SLAB:~/hello_world$ cat /etc/os-release | head -2
NAME="Ubuntu"
VERSION="20.04 LTS (Focal Fossa)"
ace#SLAB:~/hello_world$
Also tried using DATABASE_URL "sqlite::memory:" (both in environment variable and in main.rs) with system table "sqlite_master". Got different error:
error[E0277]: the trait bound `&sqlx_core::sqlite::connection::SqliteConnection: sqlx_core::executor::RefExecutor<'_>` is not satisfied
... but it must have gotten partway to success because when I used table name "Xsqlite_master" with memory db, it complained that there was no such table.
Tried "sqlite://home"(etc) and every other number of slashes, zero through 4. Tried several hundred other things. :(
Thank you!

There maybe several more things to try:
Try sqlite3 /home/ace/hello_world/test.db to double verify that the DB does exist. Make sure tbl table is defined there .schema tbl
Try DB path with a single slash i.e. sqlite:/home/ace/hello_world/test.db
Lastly try using the query function not the macro https://docs.rs/sqlx/0.3.5/sqlx/fn.query.html to see if it works.

Related

path not being detected by Nextflow

i'm new to nf-core/nextflow and needless to say the documentation does not reflect what might be actually implemented. But i'm defining the basic pipeline below:
nextflow.enable.dsl=2
process RUNBLAST{
input:
val thr
path query
path db
path output
output:
path output
script:
"""
blastn -query ${query} -db ${db} -out ${output} -num_threads ${thr}
"""
}
workflow{
//println "I want to BLAST $params.query to $params.dbDir/$params.dbName using $params.threads CPUs and output it to $params.outdir"
RUNBLAST(params.threads,params.query,params.dbDir, params.output)
}
Then i'm executing the pipeline with
nextflow run main.nf --query test2.fa --dbDir blast/blastDB
Then i get the following error:
N E X T F L O W ~ version 22.10.6
Launching `main.nf` [dreamy_hugle] DSL2 - revision: c388cf8f31
Error executing process > 'RUNBLAST'
Error executing process > 'RUNBLAST'
Caused by:
Not a valid path value: 'test2.fa'
Tip: you can replicate the issue by changing to the process work dir and entering the command bash .command.run
I know test2.fa exists in the current directory:
(nfcore) MN:nf-core-basicblast jraygozagaray$ ls
CHANGELOG.md conf other.nf
CITATIONS.md docs pyproject.toml
CODE_OF_CONDUCT.md lib subworkflows
LICENSE main.nf test.fa
README.md modules test2.fa
assets modules.json work
bin nextflow.config workflows
blast nextflow_schema.json
I also tried with "file" instead of path but that is deprecated and raises other kind of errors.
It'll be helpful to know how to fix this to get myself started with the pipeline building process.
Shouldn't nextflow copy the file to the execution path?
Thanks
You get the above error because params.query is not actually a path value. It's probably just a simple String or GString. The solution is to instead supply a file object, for example:
workflow {
query = file(params.query)
BLAST( query, ... )
}
Note that a value channel is implicitly created by a process when it is invoked with a simple value, like the above file object. If you need to be able to BLAST multiple query files, you'll instead need a queue channel, which can be created using the fromPath factory method, for example:
params.query = "${baseDir}/data/*.fa"
params.db = "${baseDir}/blastdb/nt"
params.outdir = './results'
db_name = file(params.db).name
db_path = file(params.db).parent
process BLAST {
publishDir(
path: "{params.outdir}/blast",
mode: 'copy',
)
input:
tuple val(query_id), path(query)
path db
output:
tuple val(query_id), path("${query_id}.out")
"""
blastn \\
-num_threads ${task.cpus} \\
-query "${query}" \\
-db "${db}/${db_name}" \\
-out "${query_id}.out"
"""
}
workflow{
Channel
.fromPath( params.query )
.map { file -> tuple(file.baseName, file) }
.set { query_ch }
BLAST( query_ch, db_path )
}
Note that the usual way to specify the number of threads/cpus is using cpus directive, which can be configured using a process selector in your nextflow.config. For example:
process {
withName: BLAST {
cpus = 4
}
}

bus error on usage of rusqlite with spatialite extension

I'm seeing a bus error on cargo run when attempting to load the spatialite extension with rusqlite:
Finished dev [unoptimized + debuginfo] target(s) in 1.19s
Running `target/debug/rust-spatialite-example`
[1] 33253 bus error cargo run --verbose
My suspicion is that there's a mismatch of sqlite version and spatialite and that they need to be built together rather than using the bundled feature of rusqlite, though it seems like that'd result in a different error?
Here's how things are set up:
Cargo.toml
[package]
name = "rust-spatialite-example"
version = "0.0.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
rusqlite = { version = "0.28.0", features = ["load_extension", "bundled"] }
init.sql
CREATE TABLE place (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL
);
SELECT AddGeometryColumn('place', 'geom', 4326, 'POINT', 'XY', 0);
SELECT CreateSpatialIndex('place', 'geom');
main.rs
use rusqlite::{Connection, Result, LoadExtensionGuard};
#[derive(Debug)]
struct Place {
id: i32,
name: String,
geom: String,
}
fn load_spatialite(conn: &Connection) -> Result<()> {
unsafe {
let _guard = LoadExtensionGuard::new(conn)?;
conn.load_extension("/opt/homebrew/Cellar/libspatialite/5.0.1_2/lib/mod_spatialite", None)
}
}
fn main() -> Result<()> {
let conn = Connection::open("./geo.db")?;
load_spatialite(&conn)?;
// ... sql statements that aren't executed
Ok(())
}
Running:
cat init.sql | spatialite geo.db
cargo run
The mod_spatialite path is correct (there's an expected SqliteFailure error when that path is wrong). I tried explicitly setting sqlite3_modspatialite_init as the entry point and the behavior stayed the same.

How to re-create an equivalent to linux bash statement in Deno with Deno.Run

How to re-create an equivalent to following Linux bash statement in Deno?
docker compose exec container_name -uroot -ppass db_name < ./dbDump.sql
I have tried the following:
const encoder = new TextEncoder
const p = await Deno.run({
cmd: [
'docker',
'compose',
'exec',
'container_name',
'mysql',
'-uroot',
'-ppass',
'db_name',
],
stdout: 'piped',
stderr: 'piped',
stdin: "piped",
})
await p.stdin.write(encoder.encode(await Deno.readTextFile('./dbDump.sql')))
await p.stdin.close()
await p.close()
But for some reason whenever I do it this way I get an error ERROR 1064 (42000) at line 145: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version which does not happen when I perform the exact same command in the bash.
Could someone please explain me how it has to be done properly?
Without a sample input file, it's impossible to be certain of your exact issue.
Given the context though, I suspect that your input file is too large for a single proc.stdin.write() call. Try using the writeAll() function to make sure the full payload goes through:
import { writeAll } from "https://deno.land/std#0.119.0/streams/conversion.ts";
await writeAll(proc.stdin, await Deno.readFile(sqlFilePath));
To show what this fixes, here's a Deno program pipe-to-wc.ts which passes its input to the Linux 'word count' utility (in character-counting mode):
#!/usr/bin/env -S deno run --allow-read=/dev/stdin --allow-run=wc
const proc = await Deno.run({
cmd: ['wc', '-c'],
stdin: 'piped',
});
await proc.stdin.write(await Deno.readFile('/dev/stdin'));
proc.stdin.close();
await proc.status();
If we use this program with a small input, the count lines up:
# use the shebang to make the following commands easier
$ chmod +x pipe-to-wc.ts
$ dd if=/dev/zero bs=1024 count=1 | ./pipe-to-wc.ts
1+0 records in
1+0 records out
1024 bytes (1.0 kB, 1.0 KiB) copied, 0.000116906 s, 8.8 MB/s
1024
But as soon as the input is big, only 65k bytes are going through!
$ dd if=/dev/zero bs=1024 count=100 | ./pipe-to-wc.ts
100+0 records in
100+0 records out
102400 bytes (102 kB, 100 KiB) copied, 0.0424347 s, 2.4 MB/s
65536
To fix this issue, let's replace the write() call with writeAll():
#!/usr/bin/env -S deno run --allow-read=/dev/stdin --allow-run=wc
const proc = await Deno.run({
cmd: ['wc', '-c'],
stdin: 'piped',
});
import { writeAll } from "https://deno.land/std#0.119.0/streams/conversion.ts";
await writeAll(proc.stdin, await Deno.readFile('/dev/stdin'));
proc.stdin.close();
await proc.status();
Now all the bytes are getting passed through on big inputs :D
$ dd if=/dev/zero bs=1024 count=1000 | ./pipe-to-wc.ts
1000+0 records in
1000+0 records out
1024000 bytes (1.0 MB, 1000 KiB) copied, 0.0854184 s, 12.0 MB/s
1024000
Note that this will still fail on huge inputs once they exceed the amount of memory available to your program. The writeAll() solution should be fine up to 100 megabytes or so. After that point you'd probably want to switch to a streaming solution.
First, a couple of notes:
Deno currently doesn't offer a way to create a detached subprocess. (You didn't mention this, but it seems potentially relevant to your scenario given typical docker compose usage) See denoland/deno#5501.
Deno's subprocess API is currently being reworked. See denoland/deno#11016.
Second, here are links to the relevant docs:
docker-compose exec
CLI APIs > Deno.run
Manual > Creating a subprocess (Deno v1.17.0)
Now, here's a commented breakdown of how to create a subprocess (according to the current API) using your scenario as an example:
module.ts:
const dbUser = 'actual_database_username';
const dbPass = 'actual_database_password'
const dbName = 'actual_database_name';
const dockerExecProcCmd = ['mysql', '-u', dbUser, '-p', dbPass, dbName];
const serviceName = 'actual_compose_service_name';
// Build the run command
const cmd = ['docker', 'compose', 'exec', '-T', serviceName, ...dockerExecProcCmd];
/**
* Create the subprocess
*
* For now, leave `stderr` and `stdout` undefined so they'll print
* to your console while you are debugging. Later, you can pipe (capture) them
* and handle them in your program
*/
const p = Deno.run({
cmd,
stdin: 'piped',
// stderr: 'piped',
// stdout: 'piped',
});
/**
* If you use a relative path, this will be relative to `Deno.cwd`
* at the time the subprocess is created
*
* https://doc.deno.land/deno/stable/~/Deno.cwd
*/
const sqlFilePath = './dbDump.sql';
// Write contents of SQL script to stdin
await p.stdin.write(await Deno.readFile(sqlFilePath));
/**
* Close stdin
*
* I don't know how `mysql` handles `stdin`, but if it needs the EOT sent by
* closing and you don't need to write to `stdin` any more, then this is correct
*/
p.stdin.close();
// Wait for the process to finish (either OK or NOK)
const {code} = await p.status();
console.log({'docker-compose exit status code': code});
// Not strictly necessary, but better to be explicit
p.close();

nix-shell script does nothing when using script

I'm quite new to Nix and I'm trying to create a very simple shell.nix script file.
Unfortunately I need an old package: mariadb-10.4.21. After reading and searching a bit I found out that version 10.4.17 (would've been nice to have the exact version but I couldn't find it) is in channel nixos-20.09, but when I do
$ nix-shell --version
nix-shell (Nix) 2.5.1
$ cat shell.nix
let
pkgs = import <nixpkgs> {};
# git ls-remote https://github.com/nixos/nixpkgs nixos-20.09
pkgs-20_09 = import (builtins.fetchGit {
name = "nixpks-20.09";
url = "https://github.com/nixos/nixpkgs";
ref = "refs/heads/nixos-20.09";
rev = "1c1f5649bb9c1b0d98637c8c365228f57126f361";
}) {};
in
pkgs.stdenv.mkDerivation {
pname = "test";
version = "0.1.0";
buildInputs = [
pkgs-20_09.mariadb
];
}
$ nix-shell
it just waits indefinitely without doing anything. But if I do
$ nix-shell -p mariadb -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/1c1f5649bb9c1b0d98637c8c365228f57126f361.tar.gz
[...]
/nix/store/yias2v8pm9pvfk79m65wdpcby4kiy91l-mariadb-server-10.4.17
[...]
copying path '/nix/store/yias2v8pm9pvfk79m65wdpcby4kiy91l-mariadb-server-10.4.17' from 'https://cache.nixos.org'...
[nix-shell:~/Playground]$ mariadb --version
mariadb Ver 15.1 Distrib 10.4.17-MariaDB, for Linux (x86_64) using readline 5.1
it works perfectly.
What am I doing wrong in the script for it to halt?
EDIT: I got a bit more info by running
$ nix-shell -vvv
[...]
did not find cache entry for '{"name":"nixpks-20.09","rev":"1c1f5649bb9c1b0d98637c8c365228f57126f361","type":"git"}'
did not find cache entry for '{"name":"nixpks-20.09","ref":"refs/heads/nixos-20.09","type":"git","url":"https://github.com/nixos/nixpkgs"}'
locking path '/home/test/.cache/nix/gitv3/17blyky0ja542rww32nj04jys1r9vnkg6gcfbj83drca9a862hwp.lock'
lock acquired on '/home/test/.cache/nix/gitv3/17blyky0ja542rww32nj04jys1r9vnkg6gcfbj83drca9a862hwp.lock.lock'
fetching Git repository 'https://github.com/nixos/nixpkgs'...
Is it me or it seems like it's trying to fetch from two different sources? As far as I understood all three url, rev and ref are needed for git-fetching, but it looks like if it's splitting them.
EDIT2: I've been trying with fetchFromGitHub
pkgs-20_09 = import (pkgs.fetchFromGitHub {
name = "nixpks-20.09";
owner = "nixos";
repo = "nixpkgs";
rev = "1c1f5649bb9c1b0d98637c8c365228f57126f361";
sha256 = "0f2nvdijyxfgl5kwyb4465pppd5vkhqxddx6v40k2s0z9jfhj0xl";
}) {};
and fetchTarball
pkgs-20_09 = import (builtins.fetchTarball "https://github.com/NixOS/nixpkgs/archive/1c1f5649bb9c1b0d98637c8c365228f57126f361.tar.gz") {};
and both work just fine. I'll use fetchFromGitHub from now on but it'd be interesting to now why fetchGit doesn't work.

segmentation fault pro*c code for database connection

I wrote simple pro*c program to check database connectivity. The code is :
int main()
{
char *conn_string = "IDA/IDA#DBISPSS";
int x = 10;
printf("value of x is before db connection %d\n",x);
printf(" conn_string %s \n",conn_string);
EXEC SQL CONNECT :conn_string;
EXEC SQL SELECT 1 INTO :x FROM DUAL;
printf("value of x is %d\n",x);
return 0;
}
Following commands I executed to create exectuable (test_connection) of pro*c code
proc test_connection.pc
cc -I${ORACLE_HOME}/precomp/public -c test_connection.c
cc test_connection.o -o test_connection -L$ORACLE_HOME/lib -lclntsh
and when I executed test_connection exe,the output is
value of x is before db connection 10
conn_string IDA/IDA#DBISPSS
Segmentation fault
But the same code workes well in another linux machine and solaris machine.
Why segmentation fault is thrown?
I tested in HPUX 11.11/Oracle 11 and work ok. I don't see any problem, but try some changes:
Declare 'x' into a DECLARE SECTION:
EXEC SQL BEGIN DECLARE SECTION;
int x = 0;
EXEC SQL END DECLARE SECTION;
Try this connection command:
EXEC SQL BEGIN DECLARE SECTION;
char *user = "abc", *password = "123", *database="base";
EXEC SQL END DECLARE SECTION;
EXEC SQL DECLARE BASE_HANDLE DATABASE;
...
EXEC SQL CONNECT :user IDENTIFIED BY :password AT BASE_HANDLE USING :database;
...
EXEC SQL AT BASE_HANDLE SELECT 1...
Insert a printf("here 1"); between EXEC SQL CONNECT... and EXEC SQL SELECT ... to see where SEGFAULT is thrown.
I had that problem and no amount of fiddling with my source made any difference. What finally worked was when I reinitialized all (ALL) my libraries to make sure that Oracle only had access to the 32 bit versions of the library. It seems Oracle was somehow getting connected to a 64 bit library. Only by removing all references to any libraries or executables except the 32 bit versions worked. This included running a 32 bit version of Pro*C.

Resources