Last active
August 30, 2023 20:08
-
-
Save tcartwright/22b38177c51115910756465ca123b108 to your computer and use it in GitHub Desktop.
SQL SERVER: Find Inefficient Queries
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
Author: Tim Cartwright | |
1) Leave the @dbname variable empty or null for all databases | |
2) Changed the @dbname variable to a specific variable to only get the queries for that database. | |
RETURNS: The queries with the highest cost, and longest working time with the worst offenders being at the top of the list. | |
*/ | |
DECLARE @dbname sysname = '', -- '', | |
@for_excel bit = 0 | |
;WITH XMLNAMESPACES (DEFAULT 'http://schemas.microsoft.com/sqlserver/2004/07/showplan') | |
SELECT | |
[db_name] = CAST(DB_NAME(qinfo.dbid) AS varchar(128)), | |
[text] = CASE WHEN @for_excel = 1 THEN '"' + qinfo.[text] + '"' ELSE qinfo.[text] END, | |
[param_select] = CASE WHEN @for_excel = 1 THEN '"' + fnParams.[param_select] + '"' ELSE fnParams.[param_select] END, | |
query_plan = CASE WHEN @for_excel = 1 THEN NULL ELSE qp.query_plan END, | |
[impact_weight] = CAST(fnWeight.[impact_weight] AS decimal(19, 4)), | |
[highest_statement_cost] = CAST(fnCosts.HighestStatementCost AS decimal(19, 2)), | |
qinfo.[execution_count], | |
[executions_per_minute] = CAST(qinfo.[executions_per_minute] AS decimal(19, 4)), | |
[executions_per_hour] = CAST(qinfo.[executions_per_hour] AS decimal(19, 4)), | |
[average_worker_time] = CONVERT(varchar(30), DATEADD(millisecond, (qinfo.[average_worker_time] / 1000.0), 0), 114), -- 108 for no milliseconds, 114 with | |
[average_execution_time] = CONVERT(varchar(30), DATEADD(millisecond, (qinfo.[average_exec_time] / 1000.0), 0), 114), | |
qinfo.[average_used_threads], | |
qinfo.[average_logical_reads], | |
[average_logical_reads_mb] = CAST((qinfo.[average_logical_reads] / 128.0) AS decimal(19, 2)), | |
qinfo.[average_logical_writes], | |
[average_logical_writes_mb] = CAST((qinfo.[average_logical_writes] / 128.0) AS decimal(19, 2)), | |
qinfo.[max_physical_reads], | |
[max_physical_reads_mb] = CAST((qinfo.[max_physical_reads] / 128.0) AS decimal(19, 2)), -- size of the querys data in memory | |
[average_used_memory_grant_mb] = CAST((qinfo.[average_used_memory_grant] / 1024.0) AS decimal(19, 2)), | |
[average_memory_grant_mb] = CAST((qinfo.[average_memory_grant] / 1024.0) AS decimal(19, 2)), | |
qinfo.[plan_creation_time], | |
qinfo.[last_execution_time], | |
qinfo.[query_hash], | |
qinfo.[sql_handle], | |
qinfo.[plan_handle], | |
qinfo.[min_logical_reads], | |
qinfo.[max_logical_reads] | |
FROM ( | |
SELECT qs.sql_handle, qs.plan_handle, fnExecCnt.[execution_count], | |
qs.total_worker_time, qs.total_logical_reads, | |
qs.total_logical_writes, qt.text, fnStats.average_worker_time, | |
fnStats.average_logical_reads, fnStats.average_logical_writes, | |
qt.dbid, fnStats.[average_exec_time], fnStats.[average_used_memory_grant], | |
fnStats.[average_memory_grant], fnExecs.[executions_per_minute], | |
fnStats.[average_used_threads], qs.[query_hash], | |
[plan_creation_time] = COALESCE(fnAge.[cached_time], qs.[creation_time]), | |
qs.[last_execution_time], fnExecs.[executions_per_hour], | |
qs.[min_logical_reads], qs.[max_logical_reads], qs.[max_physical_reads] | |
FROM sys.dm_exec_query_stats qs | |
CROSS APPLY sys.dm_exec_sql_text(qs.plan_handle) as qt | |
CROSS APPLY ( | |
--we may have a zero logical read, so at least one of the reads did not return data for whatever reason, | |
--so adjust the execution count so as to hopefully not skew the stats as much. Sadly, we can not account for all zeros. | |
SELECT [execution_count] = CASE | |
WHEN qs.[execution_count] <= 1 THEN qs.[execution_count] | |
WHEN qs.[min_logical_reads] = 0 THEN qs.[execution_count] - 1 | |
ELSE qs.[execution_count] | |
END | |
) fnExecCnt | |
OUTER APPLY ( | |
SELECT [age_minutes] = CASE | |
WHEN DATEDIFF(mi, ps.cached_time, GETDATE()) > 0 AND ps.[execution_count] > 1 THEN DATEDIFF(mi, ps.cached_time, GETDATE()) | |
ELSE NULL | |
END, | |
ps.cached_time | |
FROM sys.dm_exec_procedure_stats ps WHERE ps.[plan_handle] = qs.[plan_handle] | |
) fnAge | |
CROSS APPLY ( | |
SELECT [executions_per_minute] = | |
CASE WHEN fnExecCnt.[execution_count] = 0 THEN 0 | |
/* we want a exec per min but we dont want zero when the time span is less than 1 minute, so take exec count */ | |
WHEN COALESCE(fnAge.age_minutes, DATEDIFF(minute, qs.[creation_time], GETDATE()), 0) = 0 THEN fnExecCnt.[execution_count] | |
ELSE (1.00 * fnExecCnt.[execution_count] / COALESCE(fnAge.age_minutes, DATEDIFF(minute, qs.[creation_time], GETDATE()))) | |
END, | |
[executions_per_hour] = | |
CASE WHEN fnExecCnt.[execution_count] = 0 THEN 0 | |
WHEN COALESCE(fnAge.age_minutes, DATEDIFF(hour, qs.[creation_time], GETDATE()), 0) = 0 THEN fnExecCnt.[execution_count] | |
ELSE (1.00 * fnExecCnt.[execution_count] / COALESCE(fnAge.age_minutes / 60.0, DATEDIFF(hour, qs.[creation_time], GETDATE()))) | |
END | |
) fnExecs | |
CROSS APPLY ( SELECT | |
[average_worker_time] = qs.total_worker_time / fnExecCnt.[execution_count], | |
[average_logical_reads] = qs.total_logical_reads / fnExecCnt.[execution_count], | |
[average_logical_writes] = qs.total_logical_writes / fnExecCnt.[execution_count], | |
[average_exec_time] = qs.total_elapsed_time / fnExecCnt.[execution_count], | |
[average_used_memory_grant] = qs.total_used_grant_kb / fnExecCnt.[execution_count], | |
[average_memory_grant] = qs.total_grant_kb / fnExecCnt.[execution_count], | |
[average_used_threads] = [qs].[total_used_threads] / fnExecCnt.[execution_count] | |
) fnStats | |
WHERE (LEN(@dbname) = 0 OR qt.dbid = DB_ID(@dbname)) | |
--AND qs.[execution_count] >= 2 | |
AND ( | |
fnStats.[average_worker_time] >= 5000 -- microseconds or 5 milliseconds | |
OR fnStats.[average_exec_time] >= 5000 -- microseconds or 5 milliseconds | |
) | |
AND qt.dbid > 4 | |
AND qt.text NOT LIKE '%Inefficient Query Plans Query%' -- leave this in place so this beast does not show in the results. | |
) qinfo | |
CROSS APPLY sys.dm_exec_query_plan(qinfo.plan_handle) AS qp | |
CROSS APPLY ( | |
-- find the max statement cost for all the queries in the plan as each plan can contain multiple queries with individual costs | |
SELECT [HighestStatementCost] = MAX(cast(ISNULL(stmts.cost.value('@StatementSubTreeCost', 'VARCHAR(128)'), 0) as float)) | |
FROM qp.query_plan.nodes('/ShowPlanXML/BatchSequence//StmtSimple') AS stmts(cost) | |
) fnCosts | |
OUTER APPLY ( | |
--extract the parameters from the plan as xml to make it easier to find them later on | |
SELECT [param_select] = 'SELECT ' + REPLACE(STUFF(( | |
SELECT CONCAT(', ', t1.[Column], ' = ', REPLACE(REPLACE(t1.[Value], '(', ''), ')', ''), '##CRLF##') | |
FROM ( | |
SELECT [Column] = CAST(stmts.stmt.value('@Column', 'varchar(8000)') AS varchar(8000)), | |
[Value] = CAST(stmts.stmt.value('@ParameterCompiledValue', 'varchar(8000)') AS varchar(8000)) | |
FROM qp.query_plan.nodes('/ShowPlanXML/BatchSequence/Batch/Statements//ParameterList/ColumnReference') AS stmts(stmt) | |
WHERE stmts.stmt.value('count(.)', 'int') > 0 | |
) t1 | |
GROUP BY t1.[Column], t1.[Value] | |
ORDER BY REPLACE(t1.[Column], '@', '') | |
FOR XML PATH('') --, ROOT('Parameters') | |
), 1, 2, ''), '##CRLF##', CONCAT(CHAR(13), CHAR(10), CHAR(9))) | |
) fnParams | |
CROSS APPLY ( | |
--come up with an arbitrary weighting system for sorting | |
-- using a standard divisor for the larger numbers to keep from number overflow when they are all multiplied together | |
SELECT [impact_weight] = | |
(fnCosts.[HighestStatementCost] * 1.0) | |
+ (qinfo.[average_worker_time] * 1.0) | |
+ (qinfo.[average_logical_reads] * 10.0) | |
+ (qinfo.[executions_per_minute] * 10.0) | |
) fnWeight | |
WHERE fnCosts.HighestStatementCost >= 10 | |
ORDER BY fnWeight.[impact_weight] DESC, | |
fnCosts.HighestStatementCost DESC | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Typically I prefer to use Brent Ozars sp_BlitzCache, but I can not always install it, so in those cases I have this monster.