Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58
--1,VS新建数据库项目,,添加新项-sqlserverclr #,添加自定义函数,
--函数前标记 [Microsoft.SqlServer.Server.SqlFunction]
--可直接右建部署。生成后在debug文件夹下会有sql执行的部署文件
--选择一数据库,便于调试与部署测试
--注:--vs并不运行直接添加第三方引用,可参考此种方式:http://updates.sqlservervideos.com/2010/07/adding-references-to-sql-clr-projects.html
--注意,64位的sqlserver不支持32位的dll 

--开启数据库支持
sp_configure 'clr enabled', 1;
RECONFIGURE;
 
--在部署时若程序集引用了其它的dll,应将这些dll先注册进sqlserver再创建自己的程序集
--注册引用
ALTER DATABASE [master] SET TRUSTWORTHY ON
GO
CREATE ASSEMBLY [System.Net.Http]
AUTHORIZATION [dbo]
FROM 
'C:Program FilesReference AssembliesMicrosoftFrameworkv3.5System.Net.Http.dll' –没有的copy放置上
WITH PERMISSION_SET = UNSAFE

–-部署
–-1,建立程序集
–-增加
CREATE ASSEMBLY [SqlServerProject1]
from N'I:ITshareSqlServerProject1.dll'
–或者直接增加二进制文件,二进制文件见部署sql

CREATE ASSEMBLY [MySafe]
    AUTHORIZATION [dbo]
 FROM 0x4D5A900003000000040000....
WITH PERMISSION_SET = UNSAFE;

–-2,映射
CREATE FUNCTION [dbo].[Csharp]
(@code NVARCHAR (4000))
RETURNS NVARCHAR (4000)
AS
 EXTERNAL NAME [CLR].[UserDefinedFunctions].[Csharp]
–删除
DROP FUNCTION Decrypto
DROP ASSEMBLY MySafe

–-查询
/*
TA = 程序集(CLR 集成)触发器 CLR_TRIGGER
FS = 程序集 (CLR) 标量函数 CLR_SCALAR_FUNCTION
FT = 程序集 (CLR) 表值函数 CLR_TABLE_VALUED_FUNCTION
PC = 程序集 (CLR) 存储过程 CLR_STORED_PROCEDURE
*/

–-取调用的函数
SELECT s.type_desc 类型,s.name 名称,f.name 程序集,m.assembly_class 类名,m.assembly_method 方法名
FROM sys.objects s
JOIN sys.assembly_modules m ON s.object_id = m.object_id
JOIN sys.assemblies f ON m.assembly_id = f.assembly_id
WHERE type_desc LIKE 'clr%'
ORDER BY s.type_desc

–-程序集
SELECT TOP 10 *
FROM sys.assemblies

–-程序集对应文件
SELECT TOP 10 *
FROM sys.assembly_files
WHERE assembly_id = 65537
SELECT TOP 10 *
FROM sys.assembly_references

-–程序集对应函数
SELECT TOP 10 *
FROM sys.assembly_modules
SELECT dbo.Decrypto('UW7EcyjrWA2Qanp9nXmjJw==')
SELECT dbo.CLR_Decrypto('F0+zdAqmYv51nUL4fY5Z/w==')
select dbo.CLR_Encrypto('aaa')


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58
mssql中有一money类型,可以用来转换
declare @num INT
set @num = 4564654
select replace(convert(varchar,cast(@num as money),1), '.00','') --4,564,654


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

Conversion from INT to HEX

SELECT CONVERT(VARBINARY(8), 256)

Converting from HEX to INT

SELECT CONVERT(INT, 0x00000100)
 

SELECT 0x00000100*1 HextoInt

from:http://blog.sqlauthority.com/2012/07/26/sql-server-answer-how-to-convert-hex-to-decimal-or-int/
 


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

在job 运行代理步骤中加入:
-OutputVerboseLevel 4 -Output C:\TEMP\mergeagent.log
运行复制job即可

引自:
http://webcache.googleusercontent.com/search?q=cache:9mOGrpV0zukJ:www.mssqltips.com/sqlservertip/1679/getting-additional-error-messaging-information-for-sql-server-replication/+&cd=3&hl=en&ct=clnk&client=firefox-a

参数:
http://msdn.microsoft.com/en-us/library/ms147839.aspx


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58
create procedure sp_generate_insert_script
                 @tablename_mask varchar(30) = NULL
as
begin
  declare @tablename       varchar (128)
  declare @tablename_max   varchar (128)
  declare @tableid         int
  declare @columncount     numeric (7,0)
  declare @columncount_max numeric (7,0)
  declare @columnname      varchar (30)
  declare @columntype      int
  declare @string          varchar (30)
  declare @leftpart        varchar (max)   
  declare @rightpart       varchar (max)   
  declare @hasident        int

  set nocount on

  -- take ALL tables when no mask is given (!)
  if (@tablename_mask is NULL)
  begin
    select @tablename_mask = '%'
  end

  -- create table columninfo now, because it will be used several times

  create table #columninfo
  (num      numeric (7,0) identity,
   name     varchar(30),
   usertype smallint)


  select name,
         id
    into #tablenames
    from sysobjects
   where type in ('U' ,'S')
     and name like @tablename_mask

  -- loop through the table #tablenames

  select @tablename_max  = MAX (name),
         @tablename      = MIN (name)
    from #tablenames

  while @tablename <= @tablename_max
  begin
    select @tableid   = id
      from #tablenames
     where name = @tablename

    if (@@rowcount <> 0)
    begin
      -- Find out whether the table contains an identity column
      select @hasident = max( status & 0x80 )
        from syscolumns
       where id = @tableid

      truncate table #columninfo

      insert into #columninfo (name,usertype)
      select name, type
        from syscolumns C
       where id = @tableid
         and type <> 37            -- do not include timestamps

      -- Fill @leftpart with the first part of the desired insert-statement, with the fieldnames

      select @leftpart = 'select ''insert into '+@tablename
      select @leftpart = @leftpart + '('

      select @columncount     = MIN (num),
             @columncount_max = MAX (num)
        from #columninfo
      while @columncount <= @columncount_max
      begin
        select @columnname = name,
               @columntype = usertype
          from #columninfo
         where num = @columncount
        if (@@rowcount <> 0)
        begin
          if (@columncount < @columncount_max)
          begin
            select @leftpart = @leftpart + @columnname + ','
          end
          else
          begin
            select @leftpart = @leftpart + @columnname + ')'
          end
        end

        select @columncount = @columncount + 1
      end

      select @leftpart = @leftpart + ' values('''

      -- Now fill @rightpart with the statement to retrieve the values of the fields, correctly formatted

      select @columncount     = MIN (num),
             @columncount_max = MAX (num)
        from #columninfo

      select @rightpart = ''

      while @columncount <= @columncount_max
      begin
        select @columnname = name,
               @columntype = usertype
          from #columninfo
         where num = @columncount

        if (@@rowcount <> 0)
        begin

          if @columntype in (39,47)
          begin
            select @rightpart = @rightpart + '+'
            select @rightpart = @rightpart + 'ISNULL(' + replicate( char(39), 4 ) + '+replace(' + @columnname + ',' +

replicate( char(39), 4 ) + ',' + replicate( char(39), 6) + ')+' + replicate( char(39), 4 ) + ',''NULL'')'
          end

          else if @columntype = 35
                                  
          begin
            select @rightpart = @rightpart + '+'
            select @rightpart = @rightpart + 'ISNULL(' + replicate( char(39), 4 ) + '+replace(convert(varchar(1000),' +

@columnname + ')' + ',' + replicate( char(39), 4 ) + ',' + replicate( char(39), 6 ) + ')+' + replicate( char(39), 4 ) +

',''NULL'')'
          end

          else if @columntype in (58,61,111)
          begin
            select @rightpart = @rightpart + '+'
            select @rightpart = @rightpart + 'ISNULL(' + replicate( char(39), 4 ) + '+convert(varchar(20),' + @columnname +

')+'+ replicate( char(39), 4 ) + ',''NULL'')'
          end

          else  
          begin
            select @rightpart = @rightpart + '+'
            select @rightpart = @rightpart + 'ISNULL(convert(varchar(99),' + @columnname + '),''NULL'')'
          end


          if ( @columncount < @columncount_max)
          begin
            select @rightpart = @rightpart + '+'','''
          end

        end
        select @columncount = @columncount + 1
      end

    end

    select @rightpart = @rightpart + '+'')''' + ' from ' + @tablename

    -- Order the select-statements by the first column so you have the same order for
    -- different database (easy for comparisons between databases with different creation orders)
    select @rightpart = @rightpart + ' order by 1'

    -- For tables which contain an identity column we turn identity_insert on
    -- so we get exactly the same content

    if @hasident > 0
       select 'SET IDENTITY_INSERT ' + @tablename + ' ON'

    exec ( @leftpart + @rightpart )

    if @hasident > 0
       select 'SET IDENTITY_INSERT ' + @tablename + ' OFF'

    select @tablename      = MIN (name)
      from #tablenames
     where name            > @tablename
  end

end


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58
--newid()保证每次都随机
--checksum 转成数字,可能有负数
--rand保证数字都是正数,且数字足够大
--转成bigint保证不会出现e+,科学计数法
--随机整数 SELECT CHECKSUM(NEWID()) 返回随机数1到n: abs(CHECKSUM(NEWID()))%n + 1
--随机浮点数 rand(CHECKSUM(NEWID()))
--随机固定数位随机数:SELECT LEFT(CONVERT(VARCHAR(20),CONVERT(BIGINT,RAND(CHECKSUM(NEWID())) * 1e+16)) + '0000',16)

SELECT TOP 10 LEFT(r + '0000',16)
FROM sys.all_columns a
CROSS JOIN sys.all_columns b
CROSS APPLY( SELECT CONVERT(VARCHAR(20),CONVERT(BIGINT,RAND(CHECKSUM(NEWID())) * 1e+16)) r )  m

SELECT TOP 2000 rCode --排重
 FROM
(
    SELECT DISTINCT rCode
    FROM (
        SELECT  LEFT(r,6) rCode
        FROM sys.all_columns a
        CROSS APPLY( SELECT ABS(CHECKSUM(NEWID())) r )  m
    )M
    --WHERE NOT EXISTS( SELECT 1 FROM CouponBasicInfo cbi WHERE cbi.CouponNo = m.CouponNo )
) N

注意:abs(checksum(newid()))会出现重复,见下例:
          --正负
          SELECT CHECKSUM('22699D09-DBF8-4147-996A-16FF92A5EBFB')
          SELECT CHECKSUM('97293ABF-B3A8-4427-BE65-21F3A92EAE2A')
          
          --双负
          SELECT CHECKSUM('9F5BE5EA-D4C7-4207-AAB0-046B35795557')
          SELECT CHECKSUM('16A51019-E786-4FDD-B661-8C9353B20053')
          
          --双正
          SELECT CHECKSUM('E60F036B-86FB-48D0-AD09-DB0B7E6091EC')
          SELECT CHECKSUM('9106BB7D-7228-4649-8511-0C96780F1917')
参考:http://www.sqlservercentral.com/articles/Data+Generation/87901/





Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

正如”随机数的生成过程非常重要因此我们不能对其视而不见”(引自Robert R -橡树岭国家实验室),我们希望占用您一点点宝贵的时间在基于SQL Server MVP Jeff Moden的 成果的基础上完成这项工作。对于使用SQL来产生随机数来说,我们会重点讲解从均匀分布随机数(non-uniformly distributed random numbers)的基础上生成非均匀分布随机数(uniformly distributed random numbers);包括一些统计分布的基础来帮你起步。

    正如我们所知,随机数在仿真中非常重要(尤其是蒙特卡洛仿真法),还有随机数在密码学以及其它高科技领域中也扮演了同样重要的角色。除此之外在我们的SQL Server中有时也需要产生大量的随机数据来测试SQL的性能。

    因为我并不是统计学家,因此我们这里仅仅来看用SQL生成并且能显而易见的看出其随机性的的随机数字,而并不会深入到数学原理来看这个随机性是真正的“随机”还是“貌似随机”我们的意图是文章中算法的正确性以及这个算法在非关键领域是否足够使用。

   通常来说,由均匀随机数转换成非均匀随机数的技术是将均匀随机数乘以累计分布函数(CDF)对于目标数据的反转。但在实践中,累计分布函数是否针对特定分布存在有效哪怕是接近的函数并不好估计。但幸运的是,比我们聪明许多的那帮家伙已经分析过了多个领域的多种分布函数我们可以直接拿来使用,这些函数可以满足我们的大多数需求。

 

测试工具

    在我们的测试中,我们采用标准的SQL技术来使用伪URN(均匀分布随机数)函数生成Float类型的参数传给转换函数.,我们将使用标量(Scalar)函数包括SCHEMABINDING关键字解决性能问题然而,或许你还想使用同等的表值函数来测试性能是否还可以进一步提升。首先,来生成测试数据。

   

-- Data definition and setup
DECLARE @NumberOfRNs   INT
       ,@Lambda        FLOAT     -- For the Poisson NURNs
       ,@GaussianMean  FLOAT     -- For the Normal NURNs
       ,@GaussianSTDEV FLOAT
       ,@LambdaEXP     FLOAT     -- For the Exponential NURNs
       ,@WeibullAlpha  FLOAT     -- For the Weibull NURNs
       ,@WeibullBeta   FLOAT
       ,@Laplaceu      FLOAT     -- For the Laplace NURNs
       ,@Laplaceb      FLOAT

SELECT @NumberOfRNs    = 10000
      ,@Lambda         = 4.0     -- Lambda for the Poisson Distribution
      ,@GaussianMean   = 5       -- Mean for the Normal Distribution
      ,@GaussianSTDEV  = 1.5     -- Standard Deviation for the Normal Distribution
      ,@LambdaEXP      = 1.5     -- Lambda for the Exponential Distribution
      ,@WeibullAlpha   = 1.0     -- Alpha (scale) for the Weibull Distribution
      ,@WeibullBeta    = 1.5     -- Beta (shape) for the Weibull Distribution
      ,@Laplaceu       = 4.0     -- Mu (location) for the Laplace Distribution
      ,@Laplaceb       = 1.0     -- Beta (scale) for the Laplace Distribution

--CREATE TYPE Distribution AS TABLE (EventID INT, EventProb FLOAT, CumProb FLOAT)

DECLARE @Binomial      AS Distribution
    ,@DUniform         AS Distribution
    ,@Multinomial      AS Distribution

-- Simulate a coin toss with a Binomial Distribution
INSERT INTO @Binomial
SELECT 0, 0.5, 0.5 UNION ALL SELECT 1, 0.5, 1.0

-- Events returned by this Discrete Uniform distribution are the 6
-- Fibonacci numbers starting with the second occurrence of 1
INSERT INTO @DUniform
SELECT 1, 1./6., 1./6. UNION ALL SELECT 2, 1./6., 2./6.
UNION ALL SELECT 3, 1./6., 3./6. UNION ALL SELECT 5, 1./6., 4./6.
UNION ALL SELECT 8, 1./6., 5./6. UNION ALL SELECT 13, 1./6., 1.

-- Events returned by this Multinomial distribution are the 5
-- Mersenne primes discovered in 1952 by Raphael M. Robinson
INSERT INTO @Multinomial
SELECT 521, .10, .10 UNION ALL SELECT 607, .25, .35 UNION ALL SELECT 1279, .30, .65
UNION ALL SELECT 2203, .15, .80 UNION ALL SELECT 2281, .2, 1.

            (译者注:我将@NumberOfRNs 参数由100W改成1W,因为100W在测试中占用的时间过长,如果你喜欢,可以随意改动。)

   下面是测试工具,为了我们期望的目标分布生成NURNs(非均匀分布随机数):

-- Create random numbers for the selected distributions
SELECT TOP (@NumberOfRNs)
     RandomUniform     = URN
    --,RandomPoisson     = dbo.RN_POISSON(@Lambda, URN)
    ,RandomBinomial    = dbo.RN_MULTINOMIAL(@Binomial, URN)
    ,RandomDUniform    = dbo.RN_MULTINOMIAL(@DUniform, URN)
    ,RandomMultinomial = dbo.RN_MULTINOMIAL(@Multinomial, URN)
    ,RandomNormal      = dbo.RN_NORMAL(@GaussianMean, @GaussianSTDEV, URN, RAND(CHECKSUM(NEWID())))
    ,RandomExponential = dbo.RN_EXPONENTIAL(@LambdaEXP, URN)
    ,RandomWeibull     = dbo.RN_WEIBULL(@WeibullAlpha, @WeibullBeta, URN)
    ,RandomLaplace     = dbo.RN_LAPLACE(@Laplaceu, @Laplaceb, URN)
 INTO #MyRandomNumbers
 FROM sys.all_columns a1 CROSS APPLY sys.all_columns a2
 CROSS APPLY (SELECT RAND(CHECKSUM(NEWID()))) URN(URN)

 

       接下来,我们将会逐个介绍每种分布类型,但是在此之前,首先阐述一下关于我们的测试工具你可能会问到的问题:

  •     生成高斯分布NURN的算法需要两个URN
  •     我们使用RN_MULTINOMIAL 函数来生成三种不同的分布类型,因为其比另外两种更加通用(会在文章后面详细解释)
  •     在创建函数RN_MULTINOMIAL之前,首先需要创建自定义表类型。自定义表类型是在SQL Server 2008引入的,因此在SQL Server 2005中你不能使用这种方式,另一种替代方式是使用XML
  •     我已经在文章附件中附加上了脚本,你首先运行Function.sql,然后运行NURNs.sql。生成100W的数据大概需要4.5分钟(译者注:在我的笔记本上跑了2.5分钟)

 

均匀分布随机数(Uniform Random Numbers) —–怎么个均匀法?

    因为接下来我们所有的数据分布都是基于均匀随机数之上,所以我们来看一下这些由标准SQL生成的随机数是怎么个均匀法。如果这些所谓的“均匀数”如果并不是那么”均匀”的话,那或多或少会对我们后面的结果产生影响。

    在{0,1}之间的URN概率很简单,就是0.5。对于这一区间的方差则为1/12。对于使用SQL SERVER内置的AVG()和VAR()函数来汇总我们生成的100W条数据。结果和我们期望的差不多。

 

 

PopulationMean PopulationVAR SampleMean SampleVAR
4 0.083333 0.499650832838227 0.0832923192890653

   

    下面的柱状图可以清晰的看到对于我们预定义区间的数据的分布:

    1

  

    正如我们所看到的结果,虽然结果并不是非常的“标准”但是已经足够对于我们这种并不需要那么精确的测试了。这里需要注意的是,我经过多次测试选择的上述结果,并没有特别的选取某个结果,如果你自己跑一遍SQL,你会发现你得到的结果也差不多。

 

多项式随机数

    在我们开始讨论多项分布之前,我们首先看一下其它两种类型的离散分布

    人们所熟知的抛硬币概率其实就是柏松分布。这种用来表示二选一(正面或者反面)发生的概率,返回0或者1(或是其它某个数)来表示事件是否发生(也可以理 解成“成功”或”失败”)。当然了,就抛硬币而言出现正面和反面概率是对等的,都是50%。但是柏松分布也允许其它非50%的概率,毕竟人生不总是那么公 平嘛。

    离散均匀分布则描述了多于2个事件,比如说仍骰子,每一面出现的概率都是相同的。当然,这也是和我们生成给定范围内随机整数的方法是类似的。Jeff Moden曾经在这里给出过描述。

    多项式随机数比上述两种分布类型还要宽泛,它模拟了在一系列事件中每一个单独事件发生的概率并不相同的情况。

    记住,我们的事件并不必要是一系列简单数字的集合,比如0,1,2,3。还可以是任何数字(包括负数)的集合.比如,在我们的多项式分布中我们选择了由Raphael M. Robinson在1952年发现的梅森尼质数,对于我们的离散均匀分布来说,我们采用了以1为开始的斐波纳契数列的前6个数。我们还可以通过修改用户自定义表类型Distribution的EventID列由INT改为FLOAT来将事件改为FLOAT类型。

    现在我们来看上面我们所设立用于测试的三个表变量,我们可以发现:

  •     @Binomial定义了两个事件(0,1),事件发生的概率P为50%,事件不发生的概率为1-p
  •     @DUniform定义了6个事件(1, 2, 3, 5, 8, 13),就像扔骰子一样,每一个事件的概率都是1/6
  •     @Multinomial定义了5个事件(521, 607, 1279, 2203, 2281),每一个事件发生的概率各不同

 

CREATE FUNCTION dbo.RN_MULTINOMIAL
    (@Multinomial Distribution READONLY, @URN FLOAT)
 RETURNS INT --Cannot use WITH SCHEMABINDING
AS
BEGIN
    RETURN
        ISNULL(
            (   SELECT TOP 1 EventID
                FROM @Multinomial
                WHERE @URN < CumProb
                ORDER BY CumProb)
            -- Handle unlikely case where URN = exactly 1.0
            ,(  SELECT MAX(EventID)
                FROM @Multinomial))
END

    对于我们测试的每一种分布,我们都进行了大量的测试结果后概率百分比如下表:

 

RandomBinomial     BinomialFreq     EventProb   ActPercentOfEvents

0                  499778           0.5         0.499778000000

1                  500222           0.5         0.500222000000

RandomDUniform     DUniformFreq     EventProb   ActPercentOfEvents

1                  166288           0.166666    0.166288000000

2                  166620           0.166666    0.166620000000

3                  166870           0.166666    0.166870000000

5                  166539           0.166666    0.166539000000

8                  166693           0.166666    0.166693000000

13                 166990           0.166666    0.166990000000

RandomMultinomial  MultinomialFreq  EventProb   ActPercentOfEvents

521                99719            0.1         0.099719000000

607                249706           0.25        0.249706000000

1279               300376           0.3         0.300376000000

2203               149633           0.15        0.149633000000

2281               200566           0.2         0.200566000000

 

通过上面的表格不难发现,这个概率和我们所期望的概率基本吻合。

 

高斯(或正态)分布随机数

    在最近的讨论中,SSC论坛的会员GPO发帖询问基于高斯(正态)分布生成随机数的问题。所以我开始研究这个问题(这其实也是本篇文章的灵感来源)并使用了博克斯·马勒变换方法,基于我的RN_GAUSSIAN函数,你可以忽视我在这个帖子中对于如何利用URN生成NURN的算法.

    高斯分布是一个连续分布,对于熟悉这种分布需要我们一点点解释,它经常采用平均数附近的“标准”样本进行分析。

     出了平均数之外,还必须指定标准偏差。下面的函数帮助我们认识能够帮我们了解示例总体数据正态分布的形状。

CREATE FUNCTION dbo.RN_NORMAL
    (@Mean FLOAT, @StDev FLOAT, @URN1 FLOAT, @URN2 FLOAT)
 RETURNS FLOAT WITH SCHEMABINDING
AS
BEGIN
    -- Based on the Box-Muller Transform
    RETURN (@StDev * SQRT(-2 * LOG(@URN1))*COS(2*ACOS(-1.)*@URN2)) + @Mean
END

 

    首先我们先来比较总体(期望)平均数和方差与示例平均数和方差的比较来看这两者之间是否接近。

PopulationMean   PopulationSTDEV   SampleMean         SampleSTDEV

5                1.5               5.00049965700704   1.50020497041145

  然后,我们再来看图表。图中的间隔是加上或者减去3个平均数的标准差.

  2

 

     读者如果熟悉”正态”分布就会体会到这张图是多么的”标准(正态)”,我们还认识到1,000,000中有998,596(99.86%)在我们的3个平均数的标准差之内。这也是我们所期望的结果。

 

指数随机数

    指数随机分布是可以用CDF(累计分布函数)进行分布,并且可以很容易的用接近的表达式表达出来的分布。指数随机被应用于物理学,水理学,可靠性,等待时间等候论领域。

CREATE FUNCTION dbo.RN_EXPONENTIAL (@Lambda FLOAT, @URN FLOAT)
 RETURNS FLOAT WITH SCHEMABINDING
AS
BEGIN
    RETURN -LOG(@URN)/@Lambda
END

    首先要知道总体平均数是1/Lambda,标准方差是1/Lambda的平方,我们可以看到我们的总体平均数和方差和示例数据十分接近。

   PopulationMean   PopulationVAR   SampleMean   SampleVAR

   0.6667           0.4444          0.666        0.4444

 

  我们可以看由维基百科上提供的概率密度曲线,当Lambda取值为1.5时(蓝色的线)和我们的数据比较非常相似。

    3

 

韦伯分布随机数

    在大学中对韦伯分布已经小有研究,按照我的理解韦伯分布也是十分规律的,所以非常适合我们在这里生成非均匀随机数(NURN).韦伯分布在很多统计和工程领域都有应用,包括天气预报,保险,水理学,存活分析,可靠性工程(我教这门课的大学教授一定会为我骄傲的)以及其它领域。

    生成韦伯分布的公式我们可以在维基百科找到。而在我们这里实现这个方式的RN_WEIBULL函数实现起来也非常简单。两个参数分别为形状和尺度参数(@WeibullAlpha,@WeibullBeta)

CREATE FUNCTION dbo.RN_WEIBULL (@Alpha FLOAT, @Beta FLOAT, @URN FLOAT)
 RETURNS FLOAT WITH SCHEMABINDING
AS
BEGIN
    RETURN POWER((-1. / @Alpha) * LOG(1. - @URN), 1./@Beta)
END

   韦伯分布的并不是那么容易计算,因为表达式使用了伽马分布。下面是我们使用了形状参数为1.0尺度参数为1.5的图形,并与维基百科提供的图形进行了对比。

    4

 

拉普拉斯随机数

    或许是因为我是一个geek,还是由于我们大学教这门课的老师非常牛逼。我非常喜欢拉普拉斯变换这门课。当我知道拉普拉斯还发明了拉普拉斯统分布时,我将这种分布加入到本文中来表达对拉普拉斯的敬意。

    拉普拉斯分布是一种连续分布,所幸的是,它的累计分布函数(CDF)非常简单,在我们的函数中一个是位置参数,一个是尺度参数。

CREATE FUNCTION dbo.RN_LAPLACE (@u FLOAT, @b FLOAT, @URN FLOAT)
 RETURNS FLOAT WITH SCHEMABINDING
AS
BEGIN
    RETURN @u - @b * LOG(1 - 2 * ABS(@URN - 0.5)) *
        CASE WHEN 0 < @URN - 0.5 THEN 1 
             WHEN 0 > @URN - 0.5 THEN -1 
             ELSE 0 
        END
END

    拉普拉斯分布有着很容易计算的平均数(@u)和标准方差(2*@b^2)所以我们再一次将我们的总体样本数据和示例数据进行比较。

   PopulationMean   PopulationVAR   SampleMean   SampleVAR

   4                2               4.0009       1.9975

  

  我们再一次使用我们的数据和维基百科提供的数据分布图进行比较。在这里我们取@b=4(图中红线)

  5

   

总结

    从本文的研究中我们得出结论,生成的非均匀随机数基本是正确的,起码和维基百科提供的数据比来说是正确的。此外我们还发现总体平均数和方差的对应关系。我们所有示例数据都在附件的EXCEL中。

    在自然界,人类和其它领域,随机是无所不在的。而工具模拟了这些随机性帮助我们在这个混乱的世界中找到规律。
    “创新其实就是在充满不确定的自然界中引入规律”—- Eric Hoffer

    科学和工程的研究往往要使用随机数来模拟自然界的现象。我希望我们的研究可以对这些领域的人有所帮助。

    对于那些好奇为什么我们不用柏松分布来产生非均匀随机数的人。因为在SQL的内置函数中不允许我们生成多个随机数(不使用NEWID() 和 RAND()是因为它们有“副作用”),我们将继续寻找在SQL Server中生成随机数更好的办法。
    本文阐述了在统计学中普遍存在的Alpha, Beta, Gamma和F分布。但是在生成非均匀随机数背后的数学原理更加复杂,所以有兴趣的高端读者可以自行查找资料。
    我希望感谢那些勇猛无畏把本文读完的读者,尤其是那些一路跟着思考的读者。谢谢它们对于非均匀随机数的兴趣和我们对于这个领域能提供更有用的例子。

参考:http://www.cnblogs.com/CareySon/archive/2012/07/11/GenerateNURNsUsingSQLServer.html


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

/*导入多个excel入数据库
 1,用xp_cmdshell获取所有excel的名称
 2, 因sheet需要指定,要获取其每个文件要导入的第一个sheet名称
 3,利用OpenDataSource上传excel数据
*/

--excel所在路径
DECLARE @filepath NVARCHAR(1000)
SET @filepath = 'D:\nocode\liaoxiaoman\DDS\'

--获取所有excel名称
DROP TABLE duifang_file_dds
CREATE TABLE duifang_file_dds([filename] NVARCHAR(1000))

DECLARE @cmd VARCHAR(1000)
SET @cmd = N'dir "' + @filepath + '\*' + '*.xls" /B'
INSERT INTO duifang_file_dds
EXEC master..xp_cmdshell @cmd

--清除无效文件名称
DELETE a
FROM duifang_file_dds a
WHERE filename IS NULL

--加字段sheetname,ID,以便做调试
ALTER TABLE duifang_file_dds ADD sheetname NVARCHAR(50),ID INT IDENTITY(1,1)

--取出sheet名称 .此处最好用工具将所有excel的sheetname设置为统一
UPDATE a
--SET a.sheetname = replace(replace(replace(replace(dbo.SUBSTRING2(FILENAME,'绿瘦商贸','第',3),'第',''),'明细',''),'(',''),'(','')
SET a.sheetname = replace(replace(dbo.SUBSTRING2(FILENAME,'','第',3),'(',''),'(','')
FROM duifang_file_dds a

--格式化
--导入时不认点,故要将点替代为#
UPDATE a
SET a.sheetname = replace(replace(sheetname,'.','#'),' ','') + '$' --sheet以$结尾
FROM duifang_file_dds a

--首字母为数字,要前后加引号
UPDATE a
SET a.sheetname = CASE(ISNUMERIC(SUBSTRING(sheetname,1,1))) WHEN 1 THEN QUOTENAME(a.sheetname,'''') ELSE a.sheetname END 
FROM duifang_file_dds a


SELECT *
FROM duifang_file_dds a


--建表
DROP TABLE duifang_bill_dds
SELECT   代收款,F2,F3,F4,F5, REPLICATE('',100) FileName INTO   duifang_bill_dds 
FROM   OpenDataSource( 'Microsoft.Jet.OLEDB.4.0', 
'Data Source= "D:\nocode\liaoxiaoman\DDS\5.21第三批.xls";
Extended Properties= "Excel 5.0;HDR=no;IMEX=1;Excel 8.0";Persist Security Info=False ')...['5#21$']
WHERE 1=2



TRUNCATE TABLE duifang_bill_dds

DECLARE pcur CURSOR FOR
SELECT  filename,sheetname,ID
FROM duifang_file_dds
WHERE ID >=1
ORDER BY ID

DECLARE @filename NVARCHAR(1000), @sheetname NVARCHAR(50),@s NVARCHAR(1000),@ID INT

OPEN pcur
FETCH NEXT FROM pcur INTO @filename,@sheetname,@ID
WHILE @@FETCH_STATUS = 0
BEGIN 
	--代收款转成VARCHAR(50),防止包裹号出现e+
	SET @S = 'INSERT duifang_bill_dds  
	SELECT CONVERT(VARCHAR(50),convert(bigint,代收款)),F2,F3,F4,F5, ''' + @filename + ''' FROM OpenDataSource(''Microsoft.Jet.OLEDB.4.0'', 
	''Data Source= "' + @filepath + @filename + '";
	Extended Properties= "Excel 5.0;HDR=no;IMEX=1;Excel 8.0";Persist Security Info=False '')...[' + @sheetname + ']
	WHERE ISNUMERIC(代收款) = 1'
	PRINT 'ID=' + CONVERT(VARCHAR(50),@ID) + CHAR(13) +@S 
	EXEC (@s)
	FETCH NEXT FROM pcur INTO @filename,@sheetname,@ID	
END 
CLOSE pcur
DEALLOCATE pcur

--哪个文件没有导入
SELECT *
FROM duifang_file_dds a
WHERE  NOT EXISTS( SELECT 1 FROM duifang_bill_dds b WHERE a.FILENAME = b.filename )








Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58
/*
复制分发执行的是job,控制复制分发就是控制这些job的开关。

--启动合并复制所有同步
EXEC ReplControl @start = 1
--关闭所有同步
EXEC 合并复制ReplControl @start = 0
*/


CREATE PROCEDURE ReplControl
@start INT --是否启动:0停止,1开启
AS 
BEGIN 
	DECLARE @execution_status INT,@sql VARCHAR(1000)
	
	/*停止时,查询正在执行的job,开启时,查询停止的job
	@execution_status:
	1 正在执行。
	4 空闲。
	*/
	SET @execution_status = CASE(@start) WHEN 0 THEN 1 ELSE 4 END 
	SET @sql = CASE(@start) WHEN 0 THEN 'msdb.dbo.sp_stop_job @job_id = ' 
							ELSE 'msdb.dbo.sp_start_job @job_id = ' END 

	CREATE TABLE #enum_job ( 
	Job_ID uniqueidentifier, 
	Last_Run_Date int, 
	Last_Run_Time int, 
	Next_Run_Date int, 
	Next_Run_Time int, 
	Next_Run_Schedule_ID int, 
	Requested_To_Run int, 
	Request_Source int, 
	Request_Source_ID varchar(100), 
	Running int, 
	Current_Step int, 
	Current_Retry_Attempt int, 
	State int 
	)       
	insert into #enum_job 
	exec master.dbo.xp_sqlagent_enum_jobs 1,hello  
		 /*p_sqlagent_enum_jobs <is sysadmin (0 or 1)>, 
						  <job owner name> 
						  [, <job id>]

	The first parameter identifies whether you want to return information about all jobs on the server, or just jobs owned by a particular job owner. If you specify "0" for this first parameter, it means you want to return job information for a particular job owner. If you specify a "1," it means you want information for all jobs. The second parameter identifies the job owner. This parameter is required on all calls to this XP but is only used when you specify "0" for the first parameter. The third and last parameter only needs to be provided if you want to return information about a particular job_id. */

	DECLARE @Job_ID uniqueidentifier
	DECLARE @item_table TABLE (Job_ID uniqueidentifier)
	INSERT INTO @item_table(Job_ID)
	SELECT A.Job_ID
	FROM #enum_job a 
	JOIN msdb.dbo.sysjobs j ON a.Job_ID = j.job_id
	JOIN msdb.dbo.syscategories c ON j.category_id = c.category_id
	WHERE c.name = 'REPL-Merge' AND a.State = @execution_status

	DECLARE @tmpSql VARCHAR(1000)
	WHILE (SELECT COUNT(1) FROM @item_table) > 0
	BEGIN
		SELECT TOP 1 @Job_ID = Job_ID
		FROM @item_table
		SET @tmpSql = @Sql + QUOTENAME(@Job_ID,'''')
		EXEC ( @tmpSql )
		--PRINT @Job_ID

		DELETE FROM @item_table 
		WHERE @Job_ID = Job_ID
	END
	DROP TABLE #enum_job
END 




Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

使用游标的最好方式是不用游标。
why cursor is slow,It acquires locks which table variable does not do, and that is why Cursor is slow.

游标替代方案:

--Declare variables
DECLARE @item_category_id INT
Declare @X BIGINT
Set @X = 0
--Declare a memory table
DECLARE @item_table TABLE (item_category_id INT)

INSERT INTO @item_table(item_category_id)
SELECT  object_id
From master.sys.objects c1(NOLOCK)

--WHILE @loop_counter > 0 AND @item_category_counter <= @loop_counter
WHILE (SELECT COUNT(1) FROM @item_table) > 0
BEGIN
    SELECT TOP 1 @item_category_id = item_category_id
    FROM @item_table 
    
    --处理
    Set @X = @X + @item_category_id
    
   delete from @item_table where item_category_id = @item_category_id
END
PRINT @X




Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

在合并复制中,sqlserver会不断的同步架构,加上sch-m锁,即使架构没有更改,这就业务系统查询所用的sch-s冲突。为解决此问题,可暂停架构的同步,不过引起另一问题时,更新的存储过程等不会自动分发下去了,变通之道,在同步之前,开启架构复制同步,同步完成后,关闭。

—-开启架构同步
EXEC sp_changemergepublication   @publication = 'BRM-NewCOPY',   @property = N'replicate_ddl', @value = 1;
GO

—-更新存储过程…

—-关闭架构同步
EXEC sp_changemergepublication   @publication = 'BRM-NewCOPY',   @property = N'replicate_ddl', @value = 0;
GO

参考:http://msdn.microsoft.com/en-us/library/ms152562%28v=sql.90%29.aspx

By default, the following schema changes made at a Publisher running SQL Server 2005 are replicated to all SQL Server Subscribers:

  • ALTER TABLE
  • ALTER VIEW
  • ALTER PROCEDURE
  • ALTER FUNCTION
  • ALTER TRIGGER


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

尽管 IDENTITY 属性在一个表内自动进行行编号,但具有各自标识符列的各个表可以生成相同的值。这是因为 IDENTITY 属性仅在使用它的表上保证是唯一的。如果应用程序必须生成在整个数据库或世界各地所有网络计算机的所有数据库中均为唯一的标识符列,请使用 ROWGUIDCOL 属性、uniqueidentifier 数据类型和 NEWID 函数。

使用 ROWGUIDCOL 属性定义 GUID 列时,请注意下列几点:

  • 一个表只能有一个 ROWGUIDCOL 列,且必须通过使用 uniqueidentifier 数据类型定义该列。
     
  • 数据库引擎 不会自动为该列生成值。若要插入全局唯一值,请为该列创建 DEFAULT 定义来使用 NEWID 函数生成全局唯一值。
     
  • 在设置 ROWGUIDCOL 属性后,通过使用 $ROWGUID 关键字可以在选择列表中引用该列。这与通过使用 $IDENTITY 关键字可以引用 IDENTITY 列的方法类似。
     
  • OBJECTPROPERTY 函数可用于确定一个表是否具有 ROWGUIDCOL 列,COLUMNPROPERTY 函数可用于确定 ROWGUIDCOL 列的名称。
     
  • 由于 ROWGUIDCOL 属性不强制唯一性,因此应使用 UNIQUE 约束来保证插入到 ROWGUIDCOL 列中的值是唯一的。
     

在创建表时创建新的标识符列


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

SQL Server selects the deadlock victim based on the following criteria:
1. Deadlock priority – the assigned DEADLOCK_PRIORITY of a given session determines
the relative importance of it completing its transactions, if that session is
involved in a deadlock. The session with the lowest priority will always be chosen as
the deadlock victim. Deadlock priority is covered in more detail later in this chapter.
2. Rollback cost – if two or more sessions involved in a deadlock have the same
deadlock priority, then SQL Server will choose as the deadlock victim the session that
has lowest estimated cost to roll back.
3,when occurs,it selects one of the participants as a victim, cancels that spid’s current batch, and rolls backs his transaction in order to let the other spids continue with their work.  The deadlock victim will get a 1205 error:  
Transaction (Process ID 52) was deadlocked on lock resources with another process and has been chosen as the deadlock victim. Rerun the transaction.

There are circumstances (for example, a critical report that performs a long running
SELECT that must complete even if it is the ideal deadlock victim) where it may be
preferable to specify which process will be chosen as the deadlock victim in the event
of a deadlock, rather than let SQL Server decide based purely on the cost of rollback. As
demonstrated in Listing 7.18, SQL Server offers the ability to set, at the session or batch
level, a deadlock priority using the SET DEADLOCK PRIORITY option.

— Set a Low deadlock priority
SET DEADLOCK_PRIORITY LOW ;
GO
— Set a High deadlock priority
SET DEADLOCK_PRIORITY HIGH ;
GO
— Set a numeric deadlock priority
SET DEADLOCK_PRIORITY 2 ;-10到10

SQL Server 2005 and 2008 however, have three named deadlock priorities; Low, Normal, and High, as well
as a numeric range from -10 to +10, for fine-tuning the deadlock priority of different
operations.
 


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

1,RAID 0RAID 0, most commonly known as striping, provides improved I/O rates (in terms of IOPS) by striping the data across multiple drives, allowing the read and write operations to be shared amongst the drives inside the array.This level of RAID provides the best performance for both read and write operations, butprovides no redundancy or protection against data loss. In the event of a single disk failure in the array, all of the data is lost.
one task A,have a+b, for raid0:risk 1:a,risk 2:b ,so:
n disks: n(read+write)

2,RAID 1, most commonly known as mirroring, provides protection against the loss of
data from a single disk by mirroring the writes to a second disk but doesn't provide added write performanceto the system, since the maximum throughput for the array is limited to the I/O capacity
of a single disk
one task A,have a+b, for raid1: write: risk 1:a+b,risk 2:a+b ,so:
n disks: (n read + n/2 write)

3,RAID 10
RAID 10, or RAID 1+0, is a nested RAID level known as a "striped pair of mirrors." It
provides redundancy by first mirroring each disk, using RAID 1, and then striping those
mirrored disks, with RAID 0, to improve performance.
one task A,have a+b, for raid10: write: risk 1:a,risk11:a, risk 2:b ,risk:22 b
so:
n disks: (n read + n/2 write)

4,RAID 01
RAID 01 or RAID 0+1, is a nested RAID level also known as "mirrored pairs of striped
disks." In a RAID 0+1 configuration, the nested levels are the opposite of the RAID 1+0,
with the disks first being striped in a RAID 0 and then mirrored using RAID 1. However,
this type of configuration only allows for a single disk loss from one side of the array,
since a single disk failure in a RAID 0 array causes that array to fail. A loss of a single disk
in both of the RAID 0 striped arrays would result in total data loss.

5,RAID 5 (and 6)
RAID 5 is commonly known as "striping with parity;" the data is striped across multiples
disks, as per RAID 0, but parity data is stored in order to provide protection from single
disk failure. The minimum number of disks required for a RAID 5 array is three. The
parity blocks containing the parity data are staggered across the stripes inside the array
n disks: ((n-1) (read +write)

Raid 0 –每个磁盘的I/O计算= (读+写) /磁盘个数

  Raid 1 –每个磁盘的I/O计算= [读+(2*写)]/2

  Raid 5 –每个磁盘的I/O计算= [读+(4*写)]/磁盘个数

  Raid 10 –每个磁盘的I/O计算= [读+(2*写)]/磁盘个数

由上知,raid5与raid10比较,raid5对单块磁盘的I/O要求较高,常采用raid10
虽然raid5奇偶性是实现容错的一种不错的方式。但是从磁盘写入来说代价高昂。也就是说对于每一个IOP写请求,RAID5需要4个IOPS。

    为什么需要这么高写入代价的过程如下:

  •     读取原始数据(1 iop)
  •     读取当前奇偶数据(1 iop)
  •     比较当前数据和新写入请求
  •     基于数据差异计算新的奇偶值
  •     写入新数据(1 iop)
  •     写入新的奇偶值(1 iop)


6,choose what?
Data files:
 1,depends heavily on the read-to-write ratio for the .SQL Server tracks the I/O usage of the database files for an instance and makes this information available in the sys.dm_io_virtual_
file_stats Dynamic Management Function. While this information can be used
to determine if your overall workload is heavier for reads vs. writes.
For a database that is primarily read-only, RAID 5 or RAID 6 can offer good read performance.
2,RAID 5 or 6 arrays are commonly used for data warehouses, or for storing data where write latency doesn't impact overall system performance.
3,For OLTP implementations of heavy-write databases, RAID 1+0 provides the best performance

Log files:
    Since the transaction log is written to sequentially, RAID 1 can be used in most situations.the transaction log for each database should be located on dedicated physical disks

tempdb:As a general rule, the tempdb database files should be physically separate from the user
data files and transaction log files, on a dedicated disk array. Since tempdb is a writeheavy
database, RAID 1 or RAID 1+0 are usually the configurations best able to support
the concurrent workload of tempdb.since tempdb is used for temporary storage only, Solid State Disks and even RamDisks can be used to significantly improve the I/O characteristics of the tempdb database

 

http://blog.tianya.cn/blogger/post_show.asp?idWriter=0&Key=0&BlogID=854352&PostID=20759437


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

显示当前打开的状态
DBCC TRACESTATUS()

–在当前连接打开3205
DBCC TRACEON (3205)
–全局打开1222
DBCC TRACEON (3205-1)


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58
When looking at the wait statistics being tracked by SQL Server, it's important that these
wait types are eliminated from the analysis, allowing the more problematic waits in the
system to be identified. One of the things I do as a part of tracking wait information is to
maintain a script that filters out the non-problematic wait types, as shown in Listing 1.2.'

Troubleshooting SQL Server
A Guide for the Accidental DBA

--1,wait types,if for i/o,then sys.dm_io_virtual_file_stats
--2,sqlserver perfmon counts view

SELECT TOP 10
wait_type ,
max_wait_time_ms wait_time_ms ,
signal_wait_time_ms ,
wait_time_ms - signal_wait_time_ms AS resource_wait_time_ms ,
100.0 * wait_time_ms / SUM(wait_time_ms) OVER ( )
AS percent_total_waits ,
100.0 * signal_wait_time_ms / NULLIF(SUM(signal_wait_time_ms) OVER ( ),0)
AS percent_total_signal_waits ,
100.0 * ( wait_time_ms - signal_wait_time_ms )
/ SUM(wait_time_ms) OVER ( ) AS percent_total_resource_waits
FROM sys.dm_os_wait_stats
WHERE wait_time_ms > 0 -- remove zero wait_time
AND wait_type NOT IN -- filter out additional irrelevant waits
( 'SLEEP_TASK', 'BROKER_TASK_STOP', 'BROKER_TO_FLUSH',
'SQLTRACE_BUFFER_FLUSH','CLR_AUTO_EVENT', 'CLR_MANUAL_EVENT',
'LAZYWRITER_SLEEP', 'SLEEP_SYSTEMTASK', 'SLEEP_BPOOL_FLUSH',
'BROKER_EVENTHANDLER', 'XE_DISPATCHER_WAIT', 'FT_IFTSHC_MUTEX',
'CHECKPOINT_QUEUE', 'FT_IFTS_SCHEDULER_IDLE_WAIT',
'BROKER_TRANSMITTER', 'FT_IFTSHC_MUTEX', 'KSOURCE_WAKEUP',
'LAZYWRITER_SLEEP', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE',
'REQUEST_FOR_DEADLOCK_SEARCH', 'XE_TIMER_EVENT', 'BAD_PAGE_PROCESS',
'DBMIRROR_EVENTS_QUEUE', 'BROKER_RECEIVE_WAITFOR',
'PREEMPTIVE_OS_GETPROCADDRESS', 'PREEMPTIVE_OS_AUTHENTICATIONOPS',
'WAITFOR', 'DISPATCHER_QUEUE_SEMAPHORE', 'XE_DISPATCHER_JOIN',
'RESOURCE_QUEUE' )
ORDER BY wait_time_ms DESC


CXPACKET
Often indicates nothing more than that certain queries are executing with parallelism;
CXPACKET waits in the server are not an immediate sign of problems

SOS_SCHEDULER_YIELD
This may indicate that the server is under CPU press

THREADPOOL
requiring an increase in the number of
CPUs in the server, to handle a highly concurrent workload, or it can be a sign of
blocking


LCK_*
These wait types signify that blocking is occurring in the system and that sessions
have had to wait to acquire a lock of a specific typ
This problem can be investigated further using the information
in the sys.dm_db_index_operational_stats


PAGEIOLATCH_*, IO_COMPLETION, WRITELOG
These waits are commonly associated with disk I/O bottlenecks, though the root
cause of the problem may be,PAGEIOLATCH_* waits are
specifically associated with delays in being able to read or write data from the database
files. WRITELOG waits are related to issues with writing to log files.
These waits
should be evaluated in conjunction with the virtual file statistics as well as Physical
Disk performance counters


PAGELATCH_*
A lot of times
PAGELATCH_* waits are associated with allocation contention issues. One of
the best-known allocations issues associated with PAGELATCH_* waits occurs in
tempdb when the a large number of objects are being created and destroyed in
tempdb and the system experiences contention on the Shared Global Allocation
Map (SGAM), Global Allocation Map (GAM), and Page Free Space (PFS) pages in the
tempdb database.




LATCH_*
Determining the
specific latch class that has the most accumulated wait time associated with it can
be found by querying the sys.dm_os_latch_stats DMV.

ASYNC_NETWORK_IO
This wait is often incorrectly attributed to a network bottleneck,In fact, the most
common cause of this wait is a client application that is performing row-by-row
processing of the data being streamed from SQL Server as a result set (client accepts
one row, processes, accepts next row, and so on). Correcting this wait type generally
requires changing the client-side code so that it reads the result set as fast as possible,
and then performs processing



After fixing any problem in the server, in order to validate that the problem has indeed
been fixed, the wait statistics being tracked by the server can be reset using the code in
Listing 1.3.

DBCC SQLPERF('sys.dm_os_wait_stats', clear)


--------------disk I/O bottleneck.-----------
will provide cumulative physical I/O statistics, the number of reads
and writes on each data file, and the number of reads and writes on each log file, for
the various databases in the instance, from which can be calculated the ratio of reads to
writes. This also shows the number of I/O stalls and the stall time associated with the
requests, which is the total amount of time sessions have waited for I/O to be completed
on the file.
whether heavy-read or heavy-write, and at the average latency associated with the I/O, as this will
direct further investigation and possible solutions.


SELECT DB_NAME(vfs.database_id) AS database_name ,

vfs.database_id ,
vfs.FILE_ID ,
io_stall_read_ms / NULLIF(num_of_reads, 0) AS avg_read_latency ,
io_stall_write_ms / NULLIF(num_of_writes, 0)
AS avg_write_latency ,
io_stall / NULLIF(num_of_reads + num_of_writes, 0)
AS avg_total_latency ,
num_of_bytes_read / NULLIF(num_of_reads, 0)
AS avg_bytes_per_read ,
num_of_bytes_written / NULLIF(num_of_writes, 0)
AS avg_bytes_per_write ,
vfs.io_stall ,
vfs.num_of_reads ,
vfs.num_of_bytes_read ,
vfs.io_stall_read_ms ,
vfs.num_of_writes ,
vfs.num_of_bytes_written ,
vfs.io_stall_write_ms ,
size_on_disk_bytes / 1024 / 1024. AS size_on_disk_mbytes ,
physical_name
FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs
JOIN sys.master_files AS mf ON vfs.database_id = mf.database_id
AND vfs.FILE_ID = mf.FILE_ID
ORDER BY avg_total_latency DESC



SELECT * FROM sys.dm_os_performance_counters

DECLARE @CounterPrefix NVARCHAR(30)
SET @CounterPrefix = CASE WHEN @@SERVICENAME = 'MSSQLSERVER'
THEN 'SQLServer:'
ELSE 'MSSQL$' + @@SERVICENAME + ':'
END ;
-- Capture the first counter set
SELECT CAST(1 AS INT) AS collection_instance ,
[OBJECT_NAME] ,
counter_name ,
instance_name ,
cntr_value ,
cntr_type ,
CURRENT_TIMESTAMP AS collection_time
INTO #perf_counters_init
FROM sys.dm_os_performance_counters
WHERE ( OBJECT_NAME = @CounterPrefix + 'Access Methods'
AND counter_name = 'Full Scans/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Access Methods'
AND counter_name = 'Index Searches/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Buffer Manager'
AND counter_name = 'Lazy Writes/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Buffer Manager'
AND counter_name = 'Page life expectancy'
)
OR ( OBJECT_NAME = @CounterPrefix + 'General Statistics'
AND counter_name = 'Processes Blocked'
)
OR ( OBJECT_NAME = @CounterPrefix + 'General Statistics'
AND counter_name = 'User Connections'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Locks'
AND counter_name = 'Lock Waits/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Locks'
AND counter_name = 'Lock Wait Time (ms)'
)

OR ( OBJECT_NAME = @CounterPrefix + 'SQL Statistics'
AND counter_name = 'SQL Re-Compilations/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Memory Manager'
AND counter_name = 'Memory Grants Pending'
)
OR ( OBJECT_NAME = @CounterPrefix + 'SQL Statistics'
AND counter_name = 'Batch Requests/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'SQL Statistics'
AND counter_name = 'SQL Compilations/sec'
)
-- Wait on Second between data collection
WAITFOR DELAY '00:00:01'
-- Capture the second counter set
SELECT CAST(2 AS INT) AS collection_instance ,
OBJECT_NAME ,
counter_name ,
instance_name ,
cntr_value ,
cntr_type ,
CURRENT_TIMESTAMP AS collection_time
INTO #perf_counters_second
FROM sys.dm_os_performance_counters
WHERE ( OBJECT_NAME = @CounterPrefix + 'Access Methods'
AND counter_name = 'Full Scans/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Access Methods'
AND counter_name = 'Index Searches/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Buffer Manager'
AND counter_name = 'Lazy Writes/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Buffer Manager'
AND counter_name = 'Page life expectancy'
)
OR ( OBJECT_NAME = @CounterPrefix + 'General Statistics'
AND counter_name = 'Processes Blocked'
)
OR ( OBJECT_NAME = @CounterPrefix + 'General Statistics'
AND counter_name = 'User Connections'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Locks'
AND counter_name = 'Lock Waits/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Locks'
AND counter_name = 'Lock Wait Time (ms)'
)
OR ( OBJECT_NAME = @CounterPrefix + 'SQL Statistics'
AND counter_name = 'SQL Re-Compilations/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'Memory Manager'
AND counter_name = 'Memory Grants Pending'
)
OR ( OBJECT_NAME = @CounterPrefix + 'SQL Statistics'
AND counter_name = 'Batch Requests/sec'
)
OR ( OBJECT_NAME = @CounterPrefix + 'SQL Statistics'
AND counter_name = 'SQL Compilations/sec'
)
-- Calculate the cumulative counter values
SELECT i.OBJECT_NAME ,
i.counter_name ,
i.instance_name ,
CASE WHEN i.cntr_type = 272696576
THEN s.cntr_value - i.cntr_value
WHEN i.cntr_type = 65792 THEN s.cntr_value
END AS cntr_value
FROM #perf_counters_init AS i
JOIN #perf_counters_second AS s
ON i.collection_instance + 1 = s.collection_instance
AND i.OBJECT_NAME = s.OBJECT_NAME
AND i.counter_name = s.counter_name
AND i.instance_name = s.instance_name
ORDER BY OBJECT_NAME
-- Cleanup tables
DROP TABLE #perf_counters_init
DROP TABLE #perf_counters_second


The two Access Methods counters provide information about the ways that tables
are being accessed in the database. The most important one is the Full Scans/sec
counter, which can give us an idea of the number of index and table scans that are
occurring in the SYSTEM

In general, I want the number
of Index Searches/sec to be higher than the number of Full Scans/sec by a factor
of 800–1000. If the number of Full Scans/sec is too high, refer to Chapter 5, Missing
Indexes to determine if there are missing indexes in the database, resulting in excess
I/O operations.

Page Life Expectancy (PLE) which is the number of seconds a page will remain
in the data cache
the question VALUE <= (max server memory/4)*300s,
Writes/sec, which are page flushes from the buffer cache outside of the normal CHECKPOINT
process, then the server is most likely experiencing data cache memory pressure,
which will also increase the disk I/O being performed by the SQL SERVER,At this point
the Access Methods counters should be investigated to determine if excessive table or
index scans are being performed on the SQL SERVER


The General Statistics\Processes Blocked, Locks\Lock Waits/sec,
If these counters return a value other
than zero, over repeated collections of the data, then blocking is actively occurring in one
of the databases , Blocking should be used to
troubleshoot the problems further
 sp_configure ('show advanced options')
 
 The higher the number of SQL Compilations/
sec in relation to the Batch Requests/sec, the more likely the SQL Server is
experiencing an ad hoc workload that is not making optimal using of plan caching. The
higher the number of SQL Re-Compilations/sec in relation to the Batch Requests/
sec, the more likely it is that there is an inefficiency in the code design that is forcing
a recompile of the code being executed in the SQL Server. In either case, investigation
of the Plan Cache
 show advanced options
 
 The Memory Manager\Memory Grants Pending performance counter provides
information about the number of processes waiting on a workspace memory grant in
the instance.If this counter has a high value,there may be query inefficiencies in the instance that are causing excessive
memory grant requirements, for example, large sorts or hashes that can be resolved by
tuning the indexing or queries being executed




Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

1,打开跟踪,写入日志
DBCC TRACEON (3605,1222,-1) –3605写入errorlog,1222死锁
DBCC TRACEON(1222,-1) /若在启动时,加-T 1222
同时,可开profile中的Deadlock graph跟踪,以图形化
2,分析安装目录下生成的日志
   1)确定死锁的资源, 据对象不同查看方式不同。
DBCC TRACEON(3604)

DBCC PAGE(8,1,96,1)
DBCC TRACEOFF(3604)

死锁产生的前提:双方互占有了对方所需求的资源,若资源并不必要,可过滤掉。
资源不必要:1)扫描了对方的资源,扫描过的就会加锁,避免被扫描到,如加索引等。
3,死锁处理方法:
 1)从性能出发,优化sql
 2)从业务逻辑出发,看是否可去掉对死锁资料的关联。
 3)若还是不能解决,死锁是正常情况,避免不了,但可避免输出1205错误信息给客户端,方法就是加上try catch,可以等一会儿再重新执行。
如:

/*避免死锁显示给客户端 */
DECLARE @retries INT ;
SET @retries = 4 ;
WHILE ( @retries > 0 )
    BEGIN
        BEGIN TRY
            BEGIN TRANSACTION ;
            -- place sql code here
            SET @retries = 0 ;
            COMMIT TRANSACTION ;
        END TRY
        BEGIN CATCH
            -- Error is a deadlock
            IF ( ERROR_NUMBER() = 1205 )
                SET @retries = @retries - 1 ;
                -- Error is not a deadlock
            ELSE
                BEGIN
                    DECLARE @ErrorMessage NVARCHAR(4000) ;
                    DECLARE @ErrorSeverity INT ;
                    DECLARE @ErrorState INT ;
                    SELECT  @ErrorMessage = ERROR_MESSAGE() ,
                            @ErrorSeverity = ERROR_SEVERITY() ,
                            @ErrorState = ERROR_STATE() ;
                    -- Re-Raise the Error that caused the problem
                    RAISERROR (@ErrorMessage, -- Message text.
                    @ErrorSeverity, -- Severity.
                    @ErrorState -- State.
                    ) ;
                    SET @retries = 0 ;
                END
            IF XACT_STATE() <> 0
                ROLLBACK TRANSACTION ;
        END CATCH ;
    END ;
GO

/*死锁模拟
1,建立数据
2,连续两个事务
*/
drop table Employee_Demo_Heap
go
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE TABLE [Employee_Demo_Heap](
	[EmployeeID] [int] NOT NULL,
	[NationalIDNumber] [nvarchar](15) NOT NULL,
	[ContactID] [int] NOT NULL,
	[LoginID] [nvarchar](256) NOT NULL,
	[ManagerID] [int] NULL,
	[Title] [nvarchar](50) NOT NULL,
	[BirthDate] [datetime] NOT NULL,
	[MaritalStatus] [nchar](1) NOT NULL,
	[Gender] [nchar](1) NOT NULL,
	[HireDate] [datetime] NOT NULL,
	[ModifiedDate] [datetime] NOT NULL   DEFAULT (getdate()),
 CONSTRAINT [PK_Employee_EmployeeID_Demo_Heap] PRIMARY KEY nonCLUSTERED 
(
	[EmployeeID] ASC
)WITH (PAD_INDEX  = OFF, STATISTICS_NORECOMPUTE  = OFF, 
IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS  = ON, ALLOW_PAGE_LOCKS  = ON) ON [PRIMARY]
) ON [PRIMARY]
GO

CREATE NONCLUSTERED INDEX [IX_Employee_ManagerID_Demo_Heap] ON [Employee_Demo_Heap] 
(
	[ManagerID] ASC
)WITH (PAD_INDEX  = OFF, STATISTICS_NORECOMPUTE  = OFF, SORT_IN_TEMPDB = OFF, IGNORE_DUP_KEY = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS  = ON, ALLOW_PAGE_LOCKS  = ON) ON [PRIMARY]
GO
CREATE NONCLUSTERED INDEX [IX_Employee_ModifiedDate_Demo_Heap] ON [Employee_Demo_Heap] 
(
	[ModifiedDate] ASC
)WITH (PAD_INDEX  = OFF, STATISTICS_NORECOMPUTE  = OFF, SORT_IN_TEMPDB = OFF, IGNORE_DUP_KEY = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS  = ON, ALLOW_PAGE_LOCKS  = ON) ON [PRIMARY]
GO
insert into Employee_Demo_Heap
select [EmployeeID] ,
	[NationalIDNumber] ,
	[ContactID] ,
	[LoginID] ,
	[ManagerID],
	[Title] ,
	[BirthDate] ,
	[MaritalStatus] ,
	[Gender] ,
	[HireDate] ,
	[ModifiedDate] from HumanResources.Employee
go


现在就用下面这组脚本模拟出一个死锁来。在一个连接里,运行下面的语句。反复开启事务。在这个事务里,先修改一条NationalIDNumber=‘480951955’的记录,然后再把它查询出来。做完以后,提交事务。
set nocount on
go
while 1=1
begin
begin tran
update dbo.Employee_Demo_Heap
set BirthDate = getdate()
where NationalIDNumber = '480951955'
select * from dbo.Employee_Demo_Heap
where NationalIDNumber = '480951955'
commit tran
end
	在另外一个连接里,也运行这些语句。唯一的差别是这次修改和查询的是另一条NationalIDNumber = ‘407505660’的记录。
set nocount on
go
while 1=1
begin
begin tran
update dbo.Employee_Demo_Heap
set BirthDate = getdate()
where NationalIDNumber = '407505660'
select * from dbo.Employee_Demo_Heap 
where NationalIDNumber = '407505660'
commit tran
end



Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

SCOPE_IDENTITY 返回为当前会话和当前作用域中的任何表最后生成的标识值。
@@IDENTITY 返回为当前会话的所有作用域中的任何表最后生成的标识值。
IDENT_CURRENT 返回为任何会话和任何作用域中的特定表最后生成的标识值。
 

若有触发器,且触发器有插入其它表,则@@IDENTITY返回的是其它表的自增值,而不是自己所想要的,故要
用SCOPE_IDENTITY(),IDENT_CURRENT有并发问题,不考虑。

@@identity不受并发影响。因为他只受当前会话限制。
但可能会受到当前会话其它作用域的限制。
比如ta上建有insert触发器,触发器内容为在ta上插入时,同时向tb插入。
那么这时@@identity就会有误,它取到的不是ta上的新增标识值,而是tb的。
或者存储过程嵌套也可能产生问题。
如果希望受当前会话,并且只取当前作用域的值的话那么使用scope_identity()函数。

 

如果插入的表未有插入触发器插入其它表记录生成新的id列,
用@@identity无问题,各有各的连接,互不相干.
用SCOPE_IDENTITY 回插入到同一作用域中的 IDENTITY 列内的最后一个 IDENTITY 值
用scope_identity也可

IDENT_CURRENT 不受作用域和会话的限制,而受限于指定的表。IDENT_CURRENT 返回为任何会话和作用域中的特定表所生成的值
ident_current会受并发影响.


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

1,存储过程在程序慢而在Management Studio快,分析查询计划,发现生成了两个执行计划,说明二者用的不是同一个查询计划,因参数嗅探问题引发此种情况。
2,产生不同执行计划原因是:引起执行计划重新编译的原因之一是上下文环境发生改变,SET开关。这些开关会影响语句执行的行为,甚至带来不同的结果,而sqlserver要另生成新的编译计划。而在Management Studio中,默认打开了SET ARITHABORT ON 在查询执行过程中发生溢出或被零除错误时,终止查询,而程序中是不打开的,不同的SET开关,造成不同编译计划的生成,而不是重用。
最终设置:看存储过程用的什么set,将Manage Studio工具-选项-查询执行-高级 打开或关掉工具
3,故要重用程序所生成的执行计划,方式是:1)确定SET设置 2)在当前中设置上下文环境

--查找SET开关
方法1:
SELECT sql,setopts
FROM sys.syscacheobjects
WHERE OBJECT_NAME(objid,DB_ID()) = 'frmUser_sel'

方法2:
SELECT plan_handle,usecounts,pvt.set_options, pvt.sql_handle
FROM (
    SELECT plan_handle, epa.attribute, epa.value ,usecounts
    FROM sys.dm_exec_cached_plans 
    CROSS APPLY sys.dm_exec_sql_text(plan_handle) qt
    OUTER APPLY sys.dm_exec_plan_attributes(plan_handle) AS epa
    WHERE cacheobjtype = 'Compiled Plan'
    AND  OBJECT_NAME(objectid,dbid) = 'frmUser_sel' AND dbid = DB_ID()
    ) AS ecpa 
PIVOT (MAX(ecpa.value) FOR ecpa.attribute IN ("set_options", "sql_handle")) AS pvt;
GO



--将获取的值传入,获取具体SET项
declare @set_options INT
SET @set_options= 251
 
IF ( (1 & @options) = 1 ) PRINT 'DISABLE_DEF_CNST_CHK' 
IF ( (2 & @options) = 2 ) PRINT 'IMPLICIT_TRANSACTIONS' 
IF ( (4 & @options) = 4 ) PRINT 'CURSOR_CLOSE_ON_COMMIT' 
IF ( (8 & @options) = 8 ) PRINT 'ANSI_WARNINGS' 
IF ( (16 & @options) = 16 ) PRINT 'ANSI_PADDING' 
IF ( (32 & @options) = 32 ) PRINT 'ANSI_NULLS' 
IF ( (64 & @options) = 64 ) PRINT 'ARITHABORT' 
IF ( (128 & @options) = 128 ) PRINT 'ARITHIGNORE'
IF ( (256 & @options) = 256 ) PRINT 'QUOTED_IDENTIFIER' 
IF ( (512 & @options) = 512 ) PRINT 'NOCOUNT' 
IF ( (1024 & @options) = 1024 ) PRINT 'ANSI_NULL_DFLT_ON' 
IF ( (2048 & @options) = 2048 ) PRINT 'ANSI_NULL_DFLT_OFF' 
IF ( (4096 & @options) = 4096 ) PRINT 'CONCAT_NULL_YIELDS_NULL' 
IF ( (8192 & @options) = 8192 ) PRINT 'NUMERIC_ROUNDABORT' 
IF ( (16384 & @options) = 16384 ) PRINT 'XACT_ABORT'

–查看当前option select @@option 参考:Set Options

4,上述说明了相同的存储过程参数,但产生不同的执行计划的原因,但关于速度不一样呢,原因是发生了参数嗅探,解决参数嗅探方法有:1)重新编译存储过程 sp_recompile 'sp' 2)在关键语句加:option(recompile) 3)指定值OPTION (OPTIMIZE FOR (@pid = 897))


Warning: Undefined array key "HTTP_REFERER" in /www/wwwroot/prod/www.enjoyasp.net/wp-content/plugins/google-highlight/google-hilite.php on line 58

ISNULL:
check_expression

将被检查是否为 NULL 的表达式。check_expression 可以为任何类型。

replacement_value

当 check_expression 为 NULL 时要返回的表达式。replacement_value 必须是可以隐式转换为 check_expresssion 类型的类型。

ISNULL:若前一个为NULL,时,会将后一个按前一个数据类型进行转换!
测试:

DECLARE @s VARCHAR(10),@result VARCHAR(50)
           SET @result = REPLICATE(1,50)
           SELECT LEN(@result)
           SELECT LEN(ISNULL(@s,@result))