CGPROGRAM #pragma vertex vert_img #pragma fragment frag #include "UnityCG.cginc" uniform sampler2D _MainTex; uniform sampler2D _MaskTex; float4 _MyColor; float4 frag(v2f_img i) : COLOR { float4 layer = tex2D(_MainTex, i.uv); float4 mask = tex2D(_MaskTex, i.uv); float4 retur...
#pragma fragment fragRGB // _BlurAmount只参与混合, 不影响alpha值 fixed4 fragRGB (v2f_img i) : SV_Target { // v2f_img为内置结构体, 里面只包含pos和uv return fixed4(tex2D(_MainTex, i.uv).rgb, _BlurAmount); // 模糊值, 通过alpha通道控制当前屏幕纹理与历史屏幕纹理进行混合 } ENDCG }...
v2f_img vert(appdata_img v) : POSITION { v2f_img o; o.pos=mul(UNITY_MATRIX_MVP, v.vertex); o.uv=v.texcoord.xy; return o; } fixed4 frag(v2f_img i):COLOR { //Get the colors from the RenderTexture and the uv's //from the v2f_img struct fixed4 mainColor = tex2D(_MainT...
If your UI is based on IMGUI, the above approach is not possible, because there are no GameObject. In that case you can do the following instead: public class MyPlugin : XPluginBase { private GameObject _xua; private bool _lookedForXua; public void OnGUI() { // make sure we only do...
half4 frag_PreFilter (v2f_img i) : SV_Target { half4 color = tex2D(_MainTex, i.uv); float br = max(max(color.r, color.g), color.b); br = max(0.0f, (br-_Threshold))/max(br,0.000001f); color.rgb *= br; return color; ...
// 节点颜色v2f_imgvert(appdata_imgv){v2f_imgo;o.pos=UnityObjectToClipPos(v.vertex);// 模型空间顶点坐标变换到裁剪空间, 等价于: mul(UNITY_MATRIX_MVP, v.vertex)o.uv=v.texcoord;o.uv.x-=_Speed*_Time.y;// 通过uv纹理坐标的移动实现节点的移动returno;}fixed4frag(v2f_imgi):...
v2f_img o; o.pos =UnityObjectToClipPos(v.vertex);// 模型空间顶点坐标变换到裁剪空间, 等价于: mul(UNITY_MATRIX_MVP, v.vertex)o.uv = v.texcoord; o.uv.x -= _Speed * _Time.y;// 通过uv纹理坐标的移动实现节点的移动returno;
Shader"DepthTexture/DepthTextureTest"{CGINCLUDE#include"UnityCG.cginc"sampler2D _CameraDepthTexture;fixed4frag_depth(v2f_img i):SV_Target{float depthTextureValue=SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv);//float linear01EyeDepth = LinearEyeDepth(depthTextureValue) * _ProjectionParams.w;float li...
uniform sampler2D _MainTex; fixed4 frag (v2f_img i) : SV_Target { fixed4 myTex = tex2D(_MainTex, i.uv); ... } 有立体渲染: uniform sampler2D _MainTex; half4 _MainTex_ST; fixed4 frag (v2f_img i) : SV_Target { fixed4 myTex = tex2D(_MainTex, UnityStereoScreenSpaceUVAdjust(...
fixed4 frag(v2f_img i) : SV_Target { //计算偏移的方向 float2 dir = i.uv - _DistortCenter.xy; //最终偏移的值:方向 * (1-长度),越靠外偏移越小 float2 offset = _DistortFactor * normalize(dir) * (1 - length(dir)); //计算采样uv值:正常uv值+从中间向边缘逐渐增加的采样距离 ...