21
21
#include "dpu_hw_intf.h"
22
22
#include "dpu_hw_ctl.h"
23
23
#include "dpu_hw_dspp.h"
24
+ #include "dpu_hw_dsc.h"
24
25
#include "dpu_formats.h"
25
26
#include "dpu_encoder_phys.h"
26
27
#include "dpu_crtc.h"
@@ -135,6 +136,8 @@ enum dpu_enc_rc_states {
135
136
* @cur_slave: As above but for the slave encoder.
136
137
* @hw_pp: Handle to the pingpong blocks used for the display. No.
137
138
* pingpong blocks can be different than num_phys_encs.
139
+ * @hw_dsc: Handle to the DSC blocks used for the display.
140
+ * @dsc_mask: Bitmask of used DSC blocks.
138
141
* @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
139
142
* for partial update right-only cases, such as pingpong
140
143
* split where virtual pingpong does not generate IRQs
@@ -168,6 +171,7 @@ enum dpu_enc_rc_states {
168
171
* @vsync_event_work: worker to handle vsync event for autorefresh
169
172
* @topology: topology of the display
170
173
* @idle_timeout: idle timeout duration in milliseconds
174
+ * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders
171
175
*/
172
176
struct dpu_encoder_virt {
173
177
struct drm_encoder base ;
@@ -180,6 +184,9 @@ struct dpu_encoder_virt {
180
184
struct dpu_encoder_phys * cur_master ;
181
185
struct dpu_encoder_phys * cur_slave ;
182
186
struct dpu_hw_pingpong * hw_pp [MAX_CHANNELS_PER_ENC ];
187
+ struct dpu_hw_dsc * hw_dsc [MAX_CHANNELS_PER_ENC ];
188
+
189
+ unsigned int dsc_mask ;
183
190
184
191
bool intfs_swapped ;
185
192
@@ -208,6 +215,9 @@ struct dpu_encoder_virt {
208
215
u32 idle_timeout ;
209
216
210
217
bool wide_bus_en ;
218
+
219
+ /* DSC configuration */
220
+ struct msm_display_dsc_config * dsc ;
211
221
};
212
222
213
223
#define to_dpu_encoder_virt (x ) container_of(x, struct dpu_encoder_virt, base)
@@ -952,7 +962,9 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
952
962
struct dpu_hw_blk * hw_ctl [MAX_CHANNELS_PER_ENC ];
953
963
struct dpu_hw_blk * hw_lm [MAX_CHANNELS_PER_ENC ];
954
964
struct dpu_hw_blk * hw_dspp [MAX_CHANNELS_PER_ENC ] = { NULL };
955
- int num_lm , num_ctl , num_pp ;
965
+ struct dpu_hw_blk * hw_dsc [MAX_CHANNELS_PER_ENC ];
966
+ int num_lm , num_ctl , num_pp , num_dsc ;
967
+ unsigned int dsc_mask = 0 ;
956
968
int i ;
957
969
958
970
if (!drm_enc ) {
@@ -990,6 +1002,18 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
990
1002
dpu_enc -> hw_pp [i ] = i < num_pp ? to_dpu_hw_pingpong (hw_pp [i ])
991
1003
: NULL ;
992
1004
1005
+ if (dpu_enc -> dsc ) {
1006
+ num_dsc = dpu_rm_get_assigned_resources (& dpu_kms -> rm , global_state ,
1007
+ drm_enc -> base .id , DPU_HW_BLK_DSC ,
1008
+ hw_dsc , ARRAY_SIZE (hw_dsc ));
1009
+ for (i = 0 ; i < num_dsc ; i ++ ) {
1010
+ dpu_enc -> hw_dsc [i ] = to_dpu_hw_dsc (hw_dsc [i ]);
1011
+ dsc_mask |= BIT (dpu_enc -> hw_dsc [i ]-> idx - DSC_0 );
1012
+ }
1013
+ }
1014
+
1015
+ dpu_enc -> dsc_mask = dsc_mask ;
1016
+
993
1017
cstate = to_dpu_crtc_state (crtc_state );
994
1018
995
1019
for (i = 0 ; i < num_lm ; i ++ ) {
@@ -1687,6 +1711,95 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1687
1711
nsecs_to_jiffies (ktime_to_ns (wakeup_time )));
1688
1712
}
1689
1713
1714
+ static u32
1715
+ dpu_encoder_dsc_initial_line_calc (struct msm_display_dsc_config * dsc ,
1716
+ u32 enc_ip_width )
1717
+ {
1718
+ int ssm_delay , total_pixels , soft_slice_per_enc ;
1719
+
1720
+ soft_slice_per_enc = enc_ip_width / dsc -> drm -> slice_width ;
1721
+
1722
+ /*
1723
+ * minimum number of initial line pixels is a sum of:
1724
+ * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1725
+ * 91 for 10 bpc) * 3
1726
+ * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1727
+ * 3. the initial xmit delay
1728
+ * 4. total pipeline delay through the "lock step" of encoder (47)
1729
+ * 5. 6 additional pixels as the output of the rate buffer is
1730
+ * 48 bits wide
1731
+ */
1732
+ ssm_delay = ((dsc -> drm -> bits_per_component < 10 ) ? 84 : 92 );
1733
+ total_pixels = ssm_delay * 3 + dsc -> drm -> initial_xmit_delay + 47 ;
1734
+ if (soft_slice_per_enc > 1 )
1735
+ total_pixels += (ssm_delay * 3 );
1736
+ return DIV_ROUND_UP (total_pixels , dsc -> drm -> slice_width );
1737
+ }
1738
+
1739
+ static void dpu_encoder_dsc_pipe_cfg (struct dpu_hw_dsc * hw_dsc ,
1740
+ struct dpu_hw_pingpong * hw_pp ,
1741
+ struct msm_display_dsc_config * dsc ,
1742
+ u32 common_mode ,
1743
+ u32 initial_lines )
1744
+ {
1745
+ if (hw_dsc -> ops .dsc_config )
1746
+ hw_dsc -> ops .dsc_config (hw_dsc , dsc , common_mode , initial_lines );
1747
+
1748
+ if (hw_dsc -> ops .dsc_config_thresh )
1749
+ hw_dsc -> ops .dsc_config_thresh (hw_dsc , dsc );
1750
+
1751
+ if (hw_pp -> ops .setup_dsc )
1752
+ hw_pp -> ops .setup_dsc (hw_pp );
1753
+
1754
+ if (hw_pp -> ops .enable_dsc )
1755
+ hw_pp -> ops .enable_dsc (hw_pp );
1756
+ }
1757
+
1758
+ static void dpu_encoder_prep_dsc (struct dpu_encoder_virt * dpu_enc ,
1759
+ struct msm_display_dsc_config * dsc )
1760
+ {
1761
+ /* coding only for 2LM, 2enc, 1 dsc config */
1762
+ struct dpu_encoder_phys * enc_master = dpu_enc -> cur_master ;
1763
+ struct dpu_hw_dsc * hw_dsc [MAX_CHANNELS_PER_ENC ];
1764
+ struct dpu_hw_pingpong * hw_pp [MAX_CHANNELS_PER_ENC ];
1765
+ int this_frame_slices ;
1766
+ int intf_ip_w , enc_ip_w ;
1767
+ int dsc_common_mode ;
1768
+ int pic_width ;
1769
+ u32 initial_lines ;
1770
+ int i ;
1771
+
1772
+ for (i = 0 ; i < MAX_CHANNELS_PER_ENC ; i ++ ) {
1773
+ hw_pp [i ] = dpu_enc -> hw_pp [i ];
1774
+ hw_dsc [i ] = dpu_enc -> hw_dsc [i ];
1775
+
1776
+ if (!hw_pp [i ] || !hw_dsc [i ]) {
1777
+ DPU_ERROR_ENC (dpu_enc , "invalid params for DSC\n" );
1778
+ return ;
1779
+ }
1780
+ }
1781
+
1782
+ dsc_common_mode = 0 ;
1783
+ pic_width = dsc -> drm -> pic_width ;
1784
+
1785
+ dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL ;
1786
+ if (enc_master -> intf_mode == INTF_MODE_VIDEO )
1787
+ dsc_common_mode |= DSC_MODE_VIDEO ;
1788
+
1789
+ this_frame_slices = pic_width / dsc -> drm -> slice_width ;
1790
+ intf_ip_w = this_frame_slices * dsc -> drm -> slice_width ;
1791
+
1792
+ /*
1793
+ * dsc merge case: when using 2 encoders for the same stream,
1794
+ * no. of slices need to be same on both the encoders.
1795
+ */
1796
+ enc_ip_w = intf_ip_w / 2 ;
1797
+ initial_lines = dpu_encoder_dsc_initial_line_calc (dsc , enc_ip_w );
1798
+
1799
+ for (i = 0 ; i < MAX_CHANNELS_PER_ENC ; i ++ )
1800
+ dpu_encoder_dsc_pipe_cfg (hw_dsc [i ], hw_pp [i ], dsc , dsc_common_mode , initial_lines );
1801
+ }
1802
+
1690
1803
void dpu_encoder_prepare_for_kickoff (struct drm_encoder * drm_enc )
1691
1804
{
1692
1805
struct dpu_encoder_virt * dpu_enc ;
@@ -1718,6 +1831,9 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1718
1831
dpu_encoder_helper_hw_reset (dpu_enc -> phys_encs [i ]);
1719
1832
}
1720
1833
}
1834
+
1835
+ if (dpu_enc -> dsc )
1836
+ dpu_encoder_prep_dsc (dpu_enc , dpu_enc -> dsc );
1721
1837
}
1722
1838
1723
1839
void dpu_encoder_kickoff (struct drm_encoder * drm_enc )
@@ -1963,6 +2079,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
1963
2079
dpu_enc -> idle_pc_supported =
1964
2080
dpu_kms -> catalog -> caps -> has_idle_pc ;
1965
2081
2082
+ dpu_enc -> dsc = disp_info -> dsc ;
2083
+
1966
2084
mutex_lock (& dpu_enc -> enc_lock );
1967
2085
for (i = 0 ; i < disp_info -> num_of_h_tiles && !ret ; i ++ ) {
1968
2086
/*
@@ -2193,3 +2311,11 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2193
2311
2194
2312
return INTF_MODE_NONE ;
2195
2313
}
2314
+
2315
+ unsigned int dpu_encoder_helper_get_dsc (struct dpu_encoder_phys * phys_enc )
2316
+ {
2317
+ struct drm_encoder * encoder = phys_enc -> parent ;
2318
+ struct dpu_encoder_virt * dpu_enc = to_dpu_encoder_virt (encoder );
2319
+
2320
+ return dpu_enc -> dsc_mask ;
2321
+ }
0 commit comments