I have fetch the data based on specific condition from the XML value.
Idea is to have one table with two columns ID and Data(XML dataType). I have to fetch the data for specific ID.
Here is the example and I wish to achieve the result as only first row with Sears Tower ONLY. I am getting two rows.
IF OBJECT_ID('tempdb..#ExistExample') IS NOT NULL
DROP TABLE #ExistExample
GO
CREATE TABLE #ExistExample
(
XMLID Int,
XMLDocument xml
)
INSERT INTO #ExistExample
VALUES (100,'<Buildings>
<Building>
<Name>Sears Tower</Name>
<Floor1>Yes</Floor1>
<Floor2>Yes</Floor2>
<Floor3>No</Floor3>
</Building>
<Building>
<Name>IDS Building</Name>
<Floor1>Yes</Floor1>
<Floor2>Yes</Floor2>
<Floor3>Yes</Floor3>
</Building>
</Buildings>')
DECLARE #data varchar(1000)
DECLARE #ID INT
SET #ID = 101
SET #data = 'Sears Tower'
INSERT INTO #ExistExample
VALUES (101,'<Buildings>
<Building>
<Name>Sears Tower</Name>
<Floor1>Yes</Floor1>
<Floor2>Yes</Floor2>
<Floor3>No</Floor3>
</Building>
<Building>
<Name>IDS Building</Name>
<Floor1>Yes</Floor1>
<Floor2>Yes</Floor2>
<Floor3>Yes</Floor3>
</Building>
</Buildings>')
--SELECT * FROM #ExistExample
SELECT
c.value('(Name/text())[1]','varchar(25)') AS BuildingName,
c.value('(Floor1/text())[1]','varchar(25)') AS Floor1,
c.value('(Floor2/text())[1]','varchar(25)') AS Floor2,
c.value('(Floor3/text())[1]','varchar(25)') AS Floor3
FROM #ExistExample
CROSS APPLY XMLDocument.nodes('/Buildings/Building') as t(c)
WHERE c.exist('//Building/Name[.=sql:variable("#data")]') = 1
AND XMLID = #ID
Use:
DriverDetails/DriverDetail[ID eq 1]/*[not(self::ID)]
or, if you consider this simpler:
DriverDetails/DriverDetail[ID eq 1]/(PRN | Name))
Here it is assumed that (for both XPath expressions) the expression is evaluated having as initial context node the parent of DriverDetails.
Got the answer. This should be like:
SELECT c.value('(Name/text())[1]','varchar(25)') AS BuildingName,
c.value('(Floor1/text())[1]','varchar(25)') AS Floor1,
c.value('(Floor2/text())[1]','varchar(25)') AS Floor2,
c.value('(Floor3/text())[1]','varchar(25)') AS Floor3
FROM #ExistExample
CROSS APPLY XMLDocument.nodes('/Buildings/Building') as t(c)
WHERE c.value('(Name/text())[1]','varchar(25)') = #data
AND XMLID = #ID
I am not sure if this is the better way or there is any other way to achieve this.
Related
I'm trying to replace a placeholder string inside a selection of 10 random records with a random string (a name) taken from another table, using only sqlite statements.
i've done a subquery in order to replace() of the placeholder with the results of a subquery. I thought that each subquery loaded a random name from the names table, but i've found that it's not the case and each placeholder is replaced with the same string.
select id, (replace (snippet, "%NAME%", (select
name from names
where gender = "male"
) )
) as snippet
from imagedata
where timestamp is not NULL
order by random()
limit 10
I was expecting for each row of the SELECT to have different random replacement every time the subquery is invoked.
hello i'm %NAME% and this is my house
This is the car of %NAME%, let me know what you think
instead each row has the same kind of replacement:
hello i'm david and this is my house
This is the car of david, let me know what you think
and so on...
I'm not sure it can be done inside sqlite or if i have to do it in php over two different database queries.
Thanks in advance!
Seems that random() in the subquery is only evaluated once.
Try this:
select
i.id,
replace(i.snippet, '%NAME%', n.name) snippet
from (
select
id,
snippet,
abs(random()) % (select count(*) from names where gender = 'male') + 1 num
from imagedata
where timestamp is not NULL
order by random() limit 10
) i inner join (
select
n.name,
(select count(*) from names where name < n.name and gender = 'male') + 1 num
from names n
where gender = 'male'
) n on n.num = i.num
I have a string like this ('car, bus, train')
I want to convert it to be used in an in-clause. Basically I want to convert it to
('car','bus','train'). Please how do I do this in Teradata
I don't know how you are getting data like that, but if you have no control over that, you can use STRTOK_SPLIT_TO_TABLE.
select t.* from table (strtok_split_to_table(1,'car, bus, train',',')
returns (outkey integer,tokennum integer,resultstring varchar(25))) as t
Run by itself, that gives you:
outkey tokennum resultstring
1 1 car
1 2 bus
1 3 train
You can use that as a derived table and join it to the table you want to filter by. Something like:
select
<your table>.*
from
<your table>
inner join (select t.* from table (strtok_split_to_table(1,'car, bus, train',',')
returns (outkey integer,tokennum integer,resultstring varchar(25))) as t) dt
on yourtable.yourcolumn = dt.resultstring
here is the another way of spliting the input for n number of commas and use IN clause.
SELECT regexp_substr('car,bus,train','[^,]+',1,day_of_calendar) fields
FROM sys_calendar.calendar
WHERE day_of_calendar <= (CHAR('car,bus,train') - CHAR(oreplace('car,bus,train',',','')))+1;
Output of the Query
fields
~~~~~~~~
bus
car
train
Here is the systax to use in where clause
SELECT * FROM <your table>
WHERE yourtable.requiredColumn in
(
SELECT regexp_substr('car,bus,train','[^,]+',1,day_of_calendar) fields
FROM sys_calendar.calendar
WHERE
day_of_calendar <= (CHAR('car,bus,train') - CHAR(oreplace('car,bus,train',',','')))+1
);
Basically what we are doing here is splitting the string for each comma and below function is calculating number of commas in the string
(CHAR('car,bus,train') - CHAR(oreplace('car,bus,train',',','')))+1
I'm searching for Multiple text in multiple column of Virtual Table. I have checked this thread, this search for a single word in multiple column.
I checked with following
SELECT * FROM table WHERE table MATCH (('A:cat OR C:cat') AND ('A:dog OR C:dog')
but it seems AND condition not working.
EDIT I have tried with following,
Select count (*) FROM Table1 WHERE TBL_VIRTUAL MATCH (('A:D* AND B:D* AND C:D*') OR ('A:tar* AND B:tar* AND C:tar*'));
Select count (*) FROM Table1 WHERE TBL_VIRTUAL MATCH (('A:D* AND B:D* AND C:D*') AND ('A:tar* AND B:tar* AND C:tar*'));
These both query return me same 109 result. Then I tried what #redneb mention in below answer:
SELECT * FROM table WHERE table MATCH '(A:D* OR B:D* OR C: D*) AND (A:tar* OR B:tar* OR C:tar*)'
SELECT * FROM table WHERE table MATCH '(A:D* OR B:D* OR C: D*) OR (A:tar* OR B:tar* OR C:tar*)'
But this return 0 result.
Any suggestion what I'm missing here!!
Try this instead:
SELECT *
FROM mytable
WHERE mytable MATCH '(A:cat OR C:cat) AND (A:dog OR C:dog)';
However, I suspect that the following query will perform faster:
SELECT *
FROM mytable
WHERE mytable MATCH '(A:cat AND C:dog) OR (A:dog AND C:cat)';
and is equivalent to the first one.
Edit: Here's a complete example. Let's create and populate a table first:
CREATE VIRTUAL TABLE mytable USING fts3(A, C);
INSERT INTO mytable VALUES
('foo','bar'),
('dog','dog'),
('cat','cat'),
('dog','cat'),
('cat','dog');
Then the query works as expected:
sqlite> SELECT * FROM mytable WHERE mytable MATCH '(A:cat AND C:dog) OR (A:dog AND C:cat)';
A C
---------- ----------
dog cat
cat dog
For OR condition type OR between i.e. : MATCH ('A:cat OR C:cat')
For AND condition just don't type anything i.e. : MATCH ('A:cat C:cat')
I have this query
Select distinct p_id, p_date,p_city
from p_master
where p_a_id in(1,2,5,8,2,1,10,02)
and my IN clause contains 200 values. How do I get to know which ones weren't returned by the query. Each value in the IN clause may have a record in some cases they don't. I want to know all the records that weren't found for any selected p_a_id type.
Please help
This will do the trick but I'm sure there's an easier way to find this out :-)
with test1 as
(select '1,2,5,8,2,1,10,02' str from dual)
select * from (
select trim(x.column_value.extract('e/text()')) cols
from test1 t, table (xmlsequence(xmltype('<e><e>' || replace(t.str,',','</e><e>')|| '</e></e>').extract('e/e'))) x) cols
left outer join
(Select count(*), p_a_id from p_master where p_a_id in (1,2,5,8,2,1,10,02) group by p_a_id) p
on p.p_a_id = cols.cols
where p_a_id is null
;
I have an column in table where this column name is items it contains value like this
itemID items
1 school,college
2 place, country
3 college,cricket
4 School,us,college
5 cricket,country,place
6 football,tennis,place
7 names,tennis,cricket
8 sports,tennis
Now I need to write a search query
Ex: if the user types 'cricket' into a textbox and clicks the button I need to check in the column items for cricket.
In the table I have 3 rows with cricket in the items column (ItemId = 3, 5, 7)
If the user types in tennis,cricket then I need to get the records that match either one. So I need to get 5 row (ItemId = 3, 5, 6, 7, 8)
How do I write a query for this requirement?
You need to start by redesigning your database as this is is a very bad structure. You NEVER store a comma delimited list in a field. First think about waht fields you need and then design a proper database.
The very bad structure of this table (holding multiple values in one column) is the reason you are facing this issue. Your best option is to normalize the table.
But if you can't, then you can use the "Like" operator, with a wildcard
Select * From Table
Where items Like '%cricket%'
or
Select * From Table
Where items Like '%cricket%'
or items Like '%tenis%'
You will need to dynamically construct these sql queries from the inputs the user makes. The other alternative is to write code on the server to turn the comma delimited list of parameters into a table variable or temp table and then join to it..
Delimited values in columns is almost always a bad table design. Fix your table structure.
If for some reason you are unable to do that, the best you can hope for is this:
SELECT * FROM [MyTable] WHERE items LIKE '%CRICKET%'
This is still very bad, for two important reasons:
Correctness. It would return values that only contain the word cricket. Using your tennis example, what if you also had a "tennis shoes" item?
Performance. It's not sargable, which means the query won't work with any indexes you may have on that column. That means your query will probably be incredibly slow.
If you need help fixing this structure, the solution is to add another table — we'll call it TableItems — with a column for your ItemID that will be a foreign key to your original table and an item field (singular) for each of your item values. Then you can join to that table and match a column value exactly. If these items work more like categories, where you want to rows with the "Cricket" item to match the same cricket item, you also want a third table to be an intersection between your original table and the other one I just had you create.
For a single item:
SELECT itemID, items FROM MyTable WHERE items LIKE '%cricket%'
For multiple items:
SELECT itemID, items FROM MyTable WHERE items LIKE '%tennis%' or items LIKE '%cricket%'
You'll need to parse the input and split them up and add each item to the query:
items LIKE '%item1%' or items LIKE '%item2%' or items LIKE '%item3%' ...
I think that in the interest of validity of data, it should be normalized so that you split the Items into a separate table with an item on each row.
In either case, here is a working sample that uses a user defined function to split the incoming string into a Table Variable and then uses JOIN with a LIKE
CREATE FUNCTION dbo.udf_ItemParse
(
#Input VARCHAR(8000),
#Delimeter char(1)='|'
)
RETURNS #ItemList TABLE
(
Item VARCHAR(50) ,
Pos int
)
AS
BEGIN
DECLARE #Item varchar(50)
DECLARE #StartPos int, #Length int
DECLARE #Pos int
SET #Pos = 0
WHILE LEN(#Input) > 0
BEGIN
SET #StartPos = CHARINDEX(#Delimeter, #Input)
IF #StartPos < 0 SET #StartPos = 0
SET #Length = LEN(#Input) - #StartPos - 1
IF #Length < 0 SET #Length = 0
IF #StartPos > 0
BEGIN
SET #Pos = #Pos + 1
SET #Item = SUBSTRING(#Input, 1, #StartPos - 1)
SET #Input = SUBSTRING(#Input, #StartPos + 1, LEN(#Input) - #StartPos)
END
ELSE
BEGIN
SET #Pos = #Pos+1
SET #Item = #Input
SET #Input = ''
END
INSERT #ItemList (Item, Pos) VALUES(#Item, #Pos)
END
RETURN
END
GO
DECLARE #Itemstable TABLE
(
ItemId INT,
Items VarChar (1000)
)
INSERT INTO #Itemstable
SELECT 1 itemID, 'school,college' items UNION
SELECT 2, 'place, country' UNION
SELECT 3, 'college,cricket' UNION
SELECT 4, 'School,us,college' UNION
SELECT 5, 'cricket,country,place' UNION
SELECT 6, 'footbal,tenis,place' UNION
SELECT 7, 'names,tenis,cricket' UNION
SELECT 8, 'sports,tenis'
DECLARE #SearchParameter VarChar (100)
SET #SearchParameter = 'cricket'
SELECT DISTINCT ItemsTable.*
FROM #Itemstable ItemsTable
INNER JOIN udf_ItemParse (#SearchParameter, ',') udf
ON ItemsTable.Items LIKE '%' + udf.Item + '%'
SET #SearchParameter = 'cricket,tenis'
SELECT DISTINCT ItemsTable.*
FROM #Itemstable ItemsTable
INNER JOIN udf_ItemParse (#SearchParameter, ',') udf
ON ItemsTable.Items LIKE '%' + udf.Item + '%'
Why exactly are you using a database in the first place?
I mean : you are clearly not using it's potential. If you like using comma separated stuff, try a file.
In MySQL, create a fulltext index on your table:
CREATE FULLTEXT INDEX fx_mytable_items ON mytable (items)
and issue this query:
SELECT *
FROM mytable
WHERE MATCH(items) AGAINST ('cricket tennis' IN BOOLEAN MODE)